| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2017-03-19 02:28:41 +08:00
										 |  |  |  * Minio Cloud Storage, (C) 2014, 2015, 2016, 2017 Minio, Inc. | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 	"encoding/json" | 
					
						
							|  |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2017-03-03 06:21:30 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 	"net" | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	"path/filepath" | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 	"reflect" | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	"sort" | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/minio/minio-go/pkg/set" | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | const ( | 
					
						
							|  |  |  | 	// Admin service names
 | 
					
						
							|  |  |  | 	serviceRestartRPC = "Admin.Restart" | 
					
						
							|  |  |  | 	listLocksRPC      = "Admin.ListLocks" | 
					
						
							|  |  |  | 	reInitDisksRPC    = "Admin.ReInitDisks" | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 	serverInfoDataRPC = "Admin.ServerInfoData" | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	getConfigRPC      = "Admin.GetConfig" | 
					
						
							|  |  |  | 	writeTmpConfigRPC = "Admin.WriteTmpConfig" | 
					
						
							|  |  |  | 	commitConfigRPC   = "Admin.CommitConfig" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | // localAdminClient - represents admin operation to be executed locally.
 | 
					
						
							|  |  |  | type localAdminClient struct { | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // remoteAdminClient - represents admin operation to be executed
 | 
					
						
							|  |  |  | // remotely, via RPC.
 | 
					
						
							|  |  |  | type remoteAdminClient struct { | 
					
						
							|  |  |  | 	*AuthRPCClient | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | // adminCmdRunner - abstracts local and remote execution of admin
 | 
					
						
							|  |  |  | // commands like service stop and service restart.
 | 
					
						
							|  |  |  | type adminCmdRunner interface { | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 	Restart() error | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | 	ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) | 
					
						
							| 
									
										
										
										
											2017-01-23 16:32:55 +08:00
										 |  |  | 	ReInitDisks() error | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 	ServerInfoData() (ServerInfoData, error) | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 	GetConfig() ([]byte, error) | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	WriteTmpConfig(tmpFileName string, configBytes []byte) error | 
					
						
							|  |  |  | 	CommitConfig(tmpFileName string) error | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Restart - Sends a message over channel to the go-routine
 | 
					
						
							|  |  |  | // responsible for restarting the process.
 | 
					
						
							|  |  |  | func (lc localAdminClient) Restart() error { | 
					
						
							|  |  |  | 	globalServiceSignalCh <- serviceRestart | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | // ListLocks - Fetches lock information from local lock instrumentation.
 | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | func (lc localAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) { | 
					
						
							|  |  |  | 	return listLocksInfo(bucket, prefix, duration), nil | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | // Restart - Sends restart command to remote server via RPC.
 | 
					
						
							|  |  |  | func (rc remoteAdminClient) Restart() error { | 
					
						
							| 
									
										
										
										
											2016-12-23 23:12:19 +08:00
										 |  |  | 	args := AuthRPCArgs{} | 
					
						
							|  |  |  | 	reply := AuthRPCReply{} | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	return rc.Call(serviceRestartRPC, &args, &reply) | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ListLocks - Sends list locks command to remote server via RPC.
 | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | func (rc remoteAdminClient) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) { | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 	listArgs := ListLocksQuery{ | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | 		bucket:   bucket, | 
					
						
							|  |  |  | 		prefix:   prefix, | 
					
						
							|  |  |  | 		duration: duration, | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	var reply ListLocksReply | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	if err := rc.Call(listLocksRPC, &listArgs, &reply); err != nil { | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return reply.volLocks, nil | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-23 16:32:55 +08:00
										 |  |  | // ReInitDisks - There is nothing to do here, heal format REST API
 | 
					
						
							|  |  |  | // handler has already formatted and reinitialized the local disks.
 | 
					
						
							|  |  |  | func (lc localAdminClient) ReInitDisks() error { | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ReInitDisks - Signals peers via RPC to reinitialize their disks and
 | 
					
						
							|  |  |  | // object layer.
 | 
					
						
							|  |  |  | func (rc remoteAdminClient) ReInitDisks() error { | 
					
						
							|  |  |  | 	args := AuthRPCArgs{} | 
					
						
							|  |  |  | 	reply := AuthRPCReply{} | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	return rc.Call(reInitDisksRPC, &args, &reply) | 
					
						
							| 
									
										
										
										
											2017-01-23 16:32:55 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | // ServerInfoData - Returns the server info of this server.
 | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) { | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	if globalBootTime.IsZero() { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return sid, errServerNotInitialized | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 	// Build storage info
 | 
					
						
							|  |  |  | 	objLayer := newObjectLayerFn() | 
					
						
							|  |  |  | 	if objLayer == nil { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return sid, errServerNotInitialized | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	storage := objLayer.StorageInfo() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var arns []string | 
					
						
							|  |  |  | 	for queueArn := range globalEventNotifier.GetAllExternalTargets() { | 
					
						
							|  |  |  | 		arns = append(arns, queueArn) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ServerInfoData{ | 
					
						
							|  |  |  | 		StorageInfo: storage, | 
					
						
							|  |  |  | 		ConnStats:   globalConnStats.toServerConnStats(), | 
					
						
							|  |  |  | 		HTTPStats:   globalHTTPStats.toServerHTTPStats(), | 
					
						
							|  |  |  | 		Properties: ServerProperties{ | 
					
						
							|  |  |  | 			Uptime:   UTCNow().Sub(globalBootTime), | 
					
						
							|  |  |  | 			Version:  Version, | 
					
						
							|  |  |  | 			CommitID: CommitID, | 
					
						
							|  |  |  | 			SQSARN:   arns, | 
					
						
							|  |  |  | 			Region:   serverConfig.GetRegion(), | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	}, nil | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | // ServerInfo - returns the server info of the server to which the RPC call is made.
 | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) { | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	args := AuthRPCArgs{} | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 	reply := ServerInfoDataReply{} | 
					
						
							|  |  |  | 	err := rc.Call(serverInfoDataRPC, &args, &reply) | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return sid, err | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 	return reply.ServerInfoData, nil | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | // GetConfig - returns config.json of the local server.
 | 
					
						
							|  |  |  | func (lc localAdminClient) GetConfig() ([]byte, error) { | 
					
						
							|  |  |  | 	if serverConfig == nil { | 
					
						
							|  |  |  | 		return nil, errors.New("config not present") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return json.Marshal(serverConfig) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetConfig - returns config.json of the remote server.
 | 
					
						
							|  |  |  | func (rc remoteAdminClient) GetConfig() ([]byte, error) { | 
					
						
							|  |  |  | 	args := AuthRPCArgs{} | 
					
						
							|  |  |  | 	reply := ConfigReply{} | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 	if err := rc.Call(getConfigRPC, &args, &reply); err != nil { | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return reply.Config, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | // WriteTmpConfig - writes config file content to a temporary file on
 | 
					
						
							|  |  |  | // the local server.
 | 
					
						
							|  |  |  | func (lc localAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error { | 
					
						
							|  |  |  | 	return writeTmpConfigCommon(tmpFileName, configBytes) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // WriteTmpConfig - writes config file content to a temporary file on
 | 
					
						
							|  |  |  | // a remote node.
 | 
					
						
							|  |  |  | func (rc remoteAdminClient) WriteTmpConfig(tmpFileName string, configBytes []byte) error { | 
					
						
							|  |  |  | 	wArgs := WriteConfigArgs{ | 
					
						
							|  |  |  | 		TmpFileName: tmpFileName, | 
					
						
							|  |  |  | 		Buf:         configBytes, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	err := rc.Call(writeTmpConfigRPC, &wArgs, &WriteConfigReply{}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		errorIf(err, "Failed to write temporary config file.") | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CommitConfig - Move the new config in tmpFileName onto config.json
 | 
					
						
							|  |  |  | // on a local node.
 | 
					
						
							|  |  |  | func (lc localAdminClient) CommitConfig(tmpFileName string) error { | 
					
						
							| 
									
										
										
										
											2017-03-03 06:21:30 +08:00
										 |  |  | 	configFile := getConfigFile() | 
					
						
							|  |  |  | 	tmpConfigFile := filepath.Join(getConfigDir(), tmpFileName) | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-03 06:21:30 +08:00
										 |  |  | 	err := os.Rename(tmpConfigFile, configFile) | 
					
						
							|  |  |  | 	errorIf(err, fmt.Sprintf("Failed to rename %s to %s", tmpConfigFile, configFile)) | 
					
						
							|  |  |  | 	return err | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CommitConfig - Move the new config in tmpFileName onto config.json
 | 
					
						
							|  |  |  | // on a remote node.
 | 
					
						
							|  |  |  | func (rc remoteAdminClient) CommitConfig(tmpFileName string) error { | 
					
						
							|  |  |  | 	cArgs := CommitConfigArgs{ | 
					
						
							|  |  |  | 		FileName: tmpFileName, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	cReply := CommitConfigReply{} | 
					
						
							|  |  |  | 	err := rc.Call(commitConfigRPC, &cArgs, &cReply) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		errorIf(err, "Failed to rename config file.") | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 06:48:52 +08:00
										 |  |  | // adminPeer - represents an entity that implements Restart methods.
 | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | type adminPeer struct { | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 	addr      string | 
					
						
							|  |  |  | 	cmdRunner adminCmdRunner | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // type alias for a collection of adminPeer.
 | 
					
						
							|  |  |  | type adminPeers []adminPeer | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // makeAdminPeers - helper function to construct a collection of adminPeer.
 | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | func makeAdminPeers(endpoints EndpointList) (adminPeerList adminPeers) { | 
					
						
							|  |  |  | 	thisPeer := globalMinioAddr | 
					
						
							|  |  |  | 	if globalMinioHost == "" { | 
					
						
							|  |  |  | 		thisPeer = net.JoinHostPort("localhost", globalMinioPort) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	adminPeerList = append(adminPeerList, adminPeer{ | 
					
						
							|  |  |  | 		thisPeer, | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 		localAdminClient{}, | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 	hostSet := set.CreateStringSet(globalMinioAddr) | 
					
						
							|  |  |  | 	cred := serverConfig.GetCredential() | 
					
						
							|  |  |  | 	serviceEndpoint := path.Join(minioReservedBucketPath, adminPath) | 
					
						
							|  |  |  | 	for _, host := range GetRemotePeers(endpoints) { | 
					
						
							|  |  |  | 		if hostSet.Contains(host) { | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 		hostSet.Add(host) | 
					
						
							|  |  |  | 		adminPeerList = append(adminPeerList, adminPeer{ | 
					
						
							|  |  |  | 			addr: host, | 
					
						
							|  |  |  | 			cmdRunner: &remoteAdminClient{newAuthRPCClient(authConfig{ | 
					
						
							|  |  |  | 				accessKey:       cred.AccessKey, | 
					
						
							|  |  |  | 				secretKey:       cred.SecretKey, | 
					
						
							|  |  |  | 				serverAddr:      host, | 
					
						
							|  |  |  | 				serviceEndpoint: serviceEndpoint, | 
					
						
							| 
									
										
										
										
											2017-01-12 05:59:51 +08:00
										 |  |  | 				secureConn:      globalIsSSL, | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 				serviceName:     "Admin", | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 			})}, | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 	return adminPeerList | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Initialize global adminPeer collection.
 | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | func initGlobalAdminPeers(endpoints EndpointList) { | 
					
						
							|  |  |  | 	globalAdminPeers = makeAdminPeers(endpoints) | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 06:48:52 +08:00
										 |  |  | // invokeServiceCmd - Invoke Restart command.
 | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | func invokeServiceCmd(cp adminPeer, cmd serviceSignal) (err error) { | 
					
						
							|  |  |  | 	switch cmd { | 
					
						
							|  |  |  | 	case serviceRestart: | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 		err = cp.cmdRunner.Restart() | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 06:48:52 +08:00
										 |  |  | // sendServiceCmd - Invoke Restart command on remote peers
 | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | // adminPeer followed by on the local peer.
 | 
					
						
							|  |  |  | func sendServiceCmd(cps adminPeers, cmd serviceSignal) { | 
					
						
							|  |  |  | 	// Send service command like stop or restart to all remote nodes and finally run on local node.
 | 
					
						
							|  |  |  | 	errs := make([]error, len(cps)) | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	remotePeers := cps[1:] | 
					
						
							|  |  |  | 	for i := range remotePeers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 			// we use idx+1 because remotePeers slice is 1 position shifted w.r.t cps
 | 
					
						
							|  |  |  | 			errs[idx+1] = invokeServiceCmd(remotePeers[idx], cmd) | 
					
						
							| 
									
										
										
										
											2016-12-16 14:26:15 +08:00
										 |  |  | 		}(i) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 	errs[0] = invokeServiceCmd(cps[0], cmd) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-23 16:32:55 +08:00
										 |  |  | // listPeerLocksInfo - fetch list of locks held on the given bucket,
 | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | // matching prefix held longer than duration from all peer servers.
 | 
					
						
							|  |  |  | func listPeerLocksInfo(peers adminPeers, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) { | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 	// Used to aggregate volume lock information from all nodes.
 | 
					
						
							|  |  |  | 	allLocks := make([][]VolumeLockInfo, len(peers)) | 
					
						
							|  |  |  | 	errs := make([]error, len(peers)) | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	localPeer := peers[0] | 
					
						
							|  |  |  | 	remotePeers := peers[1:] | 
					
						
							|  |  |  | 	for i, remotePeer := range remotePeers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, remotePeer adminPeer) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			// `remotePeers` is right-shifted by one position relative to `peers`
 | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | 			allLocks[idx], errs[idx] = remotePeer.cmdRunner.ListLocks(bucket, prefix, duration) | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 		}(i+1, remotePeer) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							| 
									
										
										
										
											2017-02-02 03:17:30 +08:00
										 |  |  | 	allLocks[0], errs[0] = localPeer.cmdRunner.ListLocks(bucket, prefix, duration) | 
					
						
							| 
									
										
										
										
											2017-01-04 15:39:22 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Summarizing errors received for ListLocks RPC across all
 | 
					
						
							|  |  |  | 	// nodes.  N B the possible unavailability of quorum in errors
 | 
					
						
							|  |  |  | 	// applies only to distributed setup.
 | 
					
						
							|  |  |  | 	errCount, err := reduceErrs(errs, []error{}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if errCount >= (len(peers)/2 + 1) { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return nil, InsufficientReadQuorum{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Group lock information across nodes by (bucket, object)
 | 
					
						
							|  |  |  | 	// pair. For readability only.
 | 
					
						
							|  |  |  | 	paramLockMap := make(map[nsParam][]VolumeLockInfo) | 
					
						
							|  |  |  | 	for _, nodeLocks := range allLocks { | 
					
						
							|  |  |  | 		for _, lockInfo := range nodeLocks { | 
					
						
							|  |  |  | 			param := nsParam{ | 
					
						
							|  |  |  | 				volume: lockInfo.Bucket, | 
					
						
							|  |  |  | 				path:   lockInfo.Object, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			paramLockMap[param] = append(paramLockMap[param], lockInfo) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	groupedLockInfos := []VolumeLockInfo{} | 
					
						
							|  |  |  | 	for _, volLocks := range paramLockMap { | 
					
						
							|  |  |  | 		groupedLockInfos = append(groupedLockInfos, volLocks...) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return groupedLockInfos, nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-01-23 16:32:55 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // reInitPeerDisks - reinitialize disks and object layer on peer servers to use the new format.
 | 
					
						
							|  |  |  | func reInitPeerDisks(peers adminPeers) error { | 
					
						
							|  |  |  | 	errs := make([]error, len(peers)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Send ReInitDisks RPC call to all nodes.
 | 
					
						
							|  |  |  | 	// for local adminPeer this is a no-op.
 | 
					
						
							|  |  |  | 	wg := sync.WaitGroup{} | 
					
						
							|  |  |  | 	for i, peer := range peers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, peer adminPeer) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			errs[idx] = peer.cmdRunner.ReInitDisks() | 
					
						
							|  |  |  | 		}(i, peer) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // uptimeSlice - used to sort uptimes in chronological order.
 | 
					
						
							|  |  |  | type uptimeSlice []struct { | 
					
						
							|  |  |  | 	err    error | 
					
						
							|  |  |  | 	uptime time.Duration | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ts uptimeSlice) Len() int { | 
					
						
							|  |  |  | 	return len(ts) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ts uptimeSlice) Less(i, j int) bool { | 
					
						
							|  |  |  | 	return ts[i].uptime < ts[j].uptime | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ts uptimeSlice) Swap(i, j int) { | 
					
						
							|  |  |  | 	ts[i], ts[j] = ts[j], ts[i] | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // getPeerUptimes - returns the uptime since the last time read quorum
 | 
					
						
							|  |  |  | // was established on success. Otherwise returns errXLReadQuorum.
 | 
					
						
							|  |  |  | func getPeerUptimes(peers adminPeers) (time.Duration, error) { | 
					
						
							| 
									
										
										
										
											2017-02-10 12:38:14 +08:00
										 |  |  | 	// In a single node Erasure or FS backend setup the uptime of
 | 
					
						
							|  |  |  | 	// the setup is the uptime of the single minio server
 | 
					
						
							|  |  |  | 	// instance.
 | 
					
						
							|  |  |  | 	if !globalIsDistXL { | 
					
						
							| 
									
										
										
										
											2017-03-19 02:28:41 +08:00
										 |  |  | 		return UTCNow().Sub(globalBootTime), nil | 
					
						
							| 
									
										
										
										
											2017-02-10 12:38:14 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 	uptimes := make(uptimeSlice, len(peers)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Get up time of all servers.
 | 
					
						
							|  |  |  | 	wg := sync.WaitGroup{} | 
					
						
							|  |  |  | 	for i, peer := range peers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, peer adminPeer) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							| 
									
										
										
										
											2017-04-21 22:15:53 +08:00
										 |  |  | 			serverInfoData, rpcErr := peer.cmdRunner.ServerInfoData() | 
					
						
							|  |  |  | 			uptimes[idx].uptime, uptimes[idx].err = serverInfoData.Properties.Uptime, rpcErr | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 		}(i, peer) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Sort uptimes in chronological order.
 | 
					
						
							|  |  |  | 	sort.Sort(uptimes) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Pick the readQuorum'th uptime in chronological order. i.e,
 | 
					
						
							|  |  |  | 	// the time at which read quorum was (re-)established.
 | 
					
						
							|  |  |  | 	readQuorum := len(uptimes) / 2 | 
					
						
							|  |  |  | 	validCount := 0 | 
					
						
							|  |  |  | 	latestUptime := time.Duration(0) | 
					
						
							|  |  |  | 	for _, uptime := range uptimes { | 
					
						
							|  |  |  | 		if uptime.err != nil { | 
					
						
							| 
									
										
										
										
											2017-02-10 15:26:44 +08:00
										 |  |  | 			errorIf(uptime.err, "Unable to fetch uptime") | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		validCount++ | 
					
						
							|  |  |  | 		if validCount >= readQuorum { | 
					
						
							|  |  |  | 			latestUptime = uptime.uptime | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-10 12:38:14 +08:00
										 |  |  | 	// Less than readQuorum "Admin.Uptime" RPC call returned
 | 
					
						
							|  |  |  | 	// successfully, so read-quorum unavailable.
 | 
					
						
							|  |  |  | 	if validCount < readQuorum { | 
					
						
							| 
									
										
										
										
											2017-02-08 16:13:02 +08:00
										 |  |  | 		return time.Duration(0), InsufficientReadQuorum{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return latestUptime, nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // getPeerConfig - Fetches config.json from all nodes in the setup and
 | 
					
						
							|  |  |  | // returns the one that occurs in a majority of them.
 | 
					
						
							|  |  |  | func getPeerConfig(peers adminPeers) ([]byte, error) { | 
					
						
							|  |  |  | 	if !globalIsDistXL { | 
					
						
							|  |  |  | 		return peers[0].cmdRunner.GetConfig() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs := make([]error, len(peers)) | 
					
						
							|  |  |  | 	configs := make([][]byte, len(peers)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Get config from all servers.
 | 
					
						
							|  |  |  | 	wg := sync.WaitGroup{} | 
					
						
							|  |  |  | 	for i, peer := range peers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, peer adminPeer) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			configs[idx], errs[idx] = peer.cmdRunner.GetConfig() | 
					
						
							|  |  |  | 		}(i, peer) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Find the maximally occurring config among peers in a
 | 
					
						
							|  |  |  | 	// distributed setup.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	serverConfigs := make([]serverConfigV13, len(peers)) | 
					
						
							|  |  |  | 	for i, configBytes := range configs { | 
					
						
							|  |  |  | 		if errs[i] != nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Unmarshal the received config files.
 | 
					
						
							|  |  |  | 		err := json.Unmarshal(configBytes, &serverConfigs[i]) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			errorIf(err, "Failed to unmarshal serverConfig from ", peers[i].addr) | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	configJSON, err := getValidServerConfig(serverConfigs, errs) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		errorIf(err, "Unable to find a valid server config") | 
					
						
							|  |  |  | 		return nil, traceError(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Return the config.json that was present quorum or more
 | 
					
						
							|  |  |  | 	// number of disks.
 | 
					
						
							|  |  |  | 	return json.Marshal(configJSON) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // getValidServerConfig - finds the server config that is present in
 | 
					
						
							|  |  |  | // quorum or more number of servers.
 | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (scv serverConfigV13, e error) { | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 	// majority-based quorum
 | 
					
						
							|  |  |  | 	quorum := len(serverConfigs)/2 + 1 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Count the number of disks a config.json was found in.
 | 
					
						
							|  |  |  | 	configCounter := make([]int, len(serverConfigs)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// We group equal serverConfigs by the lowest index of the
 | 
					
						
							|  |  |  | 	// same value;  e.g, let us take the following serverConfigs
 | 
					
						
							|  |  |  | 	// in a 4-node setup,
 | 
					
						
							|  |  |  | 	// serverConfigs == [c1, c2, c1, c1]
 | 
					
						
							|  |  |  | 	// configCounter == [3, 1, 0, 0]
 | 
					
						
							|  |  |  | 	// c1, c2 are the only distinct values that appear.  c1 is
 | 
					
						
							|  |  |  | 	// identified by 0, the lowest index it appears in and c2 is
 | 
					
						
							|  |  |  | 	// identified by 1. So, we need to find the number of times
 | 
					
						
							|  |  |  | 	// each of these distinct values occur.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Invariants:
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// 1. At the beginning of the i-th iteration, the number of
 | 
					
						
							|  |  |  | 	// unique configurations seen so far is equal to the number of
 | 
					
						
							|  |  |  | 	// non-zero counter values in config[:i].
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// 2. At the beginning of the i-th iteration, the sum of
 | 
					
						
							|  |  |  | 	// elements of configCounter[:i] is equal to the number of
 | 
					
						
							|  |  |  | 	// non-error configurations seen so far.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// For each of the serverConfig ...
 | 
					
						
							|  |  |  | 	for i := range serverConfigs { | 
					
						
							|  |  |  | 		// Skip nodes where getConfig failed.
 | 
					
						
							|  |  |  | 		if errs[i] != nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Check if it is equal to any of the configurations
 | 
					
						
							|  |  |  | 		// seen so far. If j == i is reached then we have an
 | 
					
						
							|  |  |  | 		// unseen configuration.
 | 
					
						
							|  |  |  | 		for j := 0; j <= i; j++ { | 
					
						
							|  |  |  | 			if j < i && configCounter[j] == 0 { | 
					
						
							|  |  |  | 				// serverConfigs[j] is known to be
 | 
					
						
							|  |  |  | 				// equal to a value that was already
 | 
					
						
							|  |  |  | 				// seen. See example above for
 | 
					
						
							|  |  |  | 				// clarity.
 | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} else if j < i && reflect.DeepEqual(serverConfigs[i], serverConfigs[j]) { | 
					
						
							|  |  |  | 				// serverConfigs[i] is equal to
 | 
					
						
							|  |  |  | 				// serverConfigs[j], update
 | 
					
						
							|  |  |  | 				// serverConfigs[j]'s counter since it
 | 
					
						
							|  |  |  | 				// is the lower index.
 | 
					
						
							|  |  |  | 				configCounter[j]++ | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} else if j == i { | 
					
						
							|  |  |  | 				// serverConfigs[i] is equal to no
 | 
					
						
							|  |  |  | 				// other value seen before. It is
 | 
					
						
							|  |  |  | 				// unique so far.
 | 
					
						
							|  |  |  | 				configCounter[i] = 1 | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} // else invariants specified above are violated.
 | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// We find the maximally occurring server config and check if
 | 
					
						
							|  |  |  | 	// there is quorum.
 | 
					
						
							|  |  |  | 	var configJSON serverConfigV13 | 
					
						
							|  |  |  | 	maxOccurrence := 0 | 
					
						
							|  |  |  | 	for i, count := range configCounter { | 
					
						
							|  |  |  | 		if maxOccurrence < count { | 
					
						
							|  |  |  | 			maxOccurrence = count | 
					
						
							|  |  |  | 			configJSON = serverConfigs[i] | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// If quorum nodes don't agree.
 | 
					
						
							|  |  |  | 	if maxOccurrence < quorum { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return scv, errXLWriteQuorum | 
					
						
							| 
									
										
										
										
											2017-02-21 04:58:50 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return configJSON, nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-02-28 03:40:27 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // Write config contents into a temporary file on all nodes.
 | 
					
						
							|  |  |  | func writeTmpConfigPeers(peers adminPeers, tmpFileName string, configBytes []byte) []error { | 
					
						
							|  |  |  | 	// For a single-node minio server setup.
 | 
					
						
							|  |  |  | 	if !globalIsDistXL { | 
					
						
							|  |  |  | 		err := peers[0].cmdRunner.WriteTmpConfig(tmpFileName, configBytes) | 
					
						
							|  |  |  | 		return []error{err} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs := make([]error, len(peers)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Write config into temporary file on all nodes.
 | 
					
						
							|  |  |  | 	wg := sync.WaitGroup{} | 
					
						
							|  |  |  | 	for i, peer := range peers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, peer adminPeer) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			errs[idx] = peer.cmdRunner.WriteTmpConfig(tmpFileName, configBytes) | 
					
						
							|  |  |  | 		}(i, peer) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Return bytes written and errors (if any) during writing
 | 
					
						
							|  |  |  | 	// temporary config file.
 | 
					
						
							|  |  |  | 	return errs | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Move config contents from the given temporary file onto config.json
 | 
					
						
							|  |  |  | // on all nodes.
 | 
					
						
							|  |  |  | func commitConfigPeers(peers adminPeers, tmpFileName string) []error { | 
					
						
							|  |  |  | 	// For a single-node minio server setup.
 | 
					
						
							|  |  |  | 	if !globalIsDistXL { | 
					
						
							|  |  |  | 		return []error{peers[0].cmdRunner.CommitConfig(tmpFileName)} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs := make([]error, len(peers)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Rename temporary config file into configDir/config.json on
 | 
					
						
							|  |  |  | 	// all nodes.
 | 
					
						
							|  |  |  | 	wg := sync.WaitGroup{} | 
					
						
							|  |  |  | 	for i, peer := range peers { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, peer adminPeer) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			errs[idx] = peer.cmdRunner.CommitConfig(tmpFileName) | 
					
						
							|  |  |  | 		}(i, peer) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Return errors (if any) received during rename.
 | 
					
						
							|  |  |  | 	return errs | 
					
						
							|  |  |  | } |