| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Minio Cloud Storage, (C) 2018 Minio, Inc. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"hash/crc32" | 
					
						
							|  |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	"sort" | 
					
						
							|  |  |  | 	"strings" | 
					
						
							|  |  |  | 	"sync" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/bpool" | 
					
						
							|  |  |  | 	"github.com/minio/minio/pkg/madmin" | 
					
						
							| 
									
										
										
										
											2018-04-25 06:53:30 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/policy" | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/sync/errgroup" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | // setsStorageAPI is encapsulated type for Close()
 | 
					
						
							|  |  |  | type setsStorageAPI [][]StorageAPI | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (s setsStorageAPI) Close() error { | 
					
						
							|  |  |  | 	for i := 0; i < len(s); i++ { | 
					
						
							|  |  |  | 		for _, disk := range s[i] { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			disk.Close() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | // xlSets implements ObjectLayer combining a static list of erasure coded
 | 
					
						
							|  |  |  | // object sets. NOTE: There is no dynamic scaling allowed or intended in
 | 
					
						
							|  |  |  | // current design.
 | 
					
						
							|  |  |  | type xlSets struct { | 
					
						
							|  |  |  | 	sets []*xlObjects | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Reference format.
 | 
					
						
							| 
									
										
										
										
											2018-03-16 04:55:23 +08:00
										 |  |  | 	format *formatXLV3 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// xlDisks mutex to lock xlDisks.
 | 
					
						
							|  |  |  | 	xlDisksMu sync.RWMutex | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Re-ordered list of disks per set.
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 	xlDisks setsStorageAPI | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// List of endpoints provided on the command line.
 | 
					
						
							|  |  |  | 	endpoints EndpointList | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Total number of sets and the number of disks per set.
 | 
					
						
							|  |  |  | 	setCount, drivesPerSet int | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Done channel to control monitoring loop.
 | 
					
						
							|  |  |  | 	disksConnectDoneCh chan struct{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Distribution algorithm of choice.
 | 
					
						
							|  |  |  | 	distributionAlgo string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Pack level listObjects pool management.
 | 
					
						
							|  |  |  | 	listPool *treeWalkPool | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // isConnected - checks if the endpoint is connected or not.
 | 
					
						
							|  |  |  | func (s *xlSets) isConnected(endpoint Endpoint) bool { | 
					
						
							|  |  |  | 	s.xlDisksMu.RLock() | 
					
						
							|  |  |  | 	defer s.xlDisksMu.RUnlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := 0; i < s.setCount; i++ { | 
					
						
							|  |  |  | 		for j := 0; j < s.drivesPerSet; j++ { | 
					
						
							|  |  |  | 			if s.xlDisks[i][j] == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-04-07 07:59:31 +08:00
										 |  |  | 			var endpointStr string | 
					
						
							|  |  |  | 			if endpoint.IsLocal { | 
					
						
							|  |  |  | 				endpointStr = endpoint.Path | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				endpointStr = endpoint.String() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if s.xlDisks[i][j].String() != endpointStr { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return s.xlDisks[i][j].IsOnline() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return false | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Initializes a new StorageAPI from the endpoint argument, returns
 | 
					
						
							|  |  |  | // StorageAPI and also `format` which exists on the disk.
 | 
					
						
							| 
									
										
										
										
											2018-03-16 04:55:23 +08:00
										 |  |  | func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	disk, err := newStorageAPI(endpoint) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	format, err := loadFormatXL(disk) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-04 12:58:48 +08:00
										 |  |  | 		// Close the internal connection to avoid connection leaks.
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		disk.Close() | 
					
						
							|  |  |  | 		return nil, nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return disk, format, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // findDiskIndex - returns the i,j'th position of the input `format` against the reference
 | 
					
						
							|  |  |  | // format, after successful validation.
 | 
					
						
							| 
									
										
										
										
											2018-03-16 04:55:23 +08:00
										 |  |  | func findDiskIndex(refFormat, format *formatXLV3) (int, int, error) { | 
					
						
							|  |  |  | 	if err := formatXLV3Check(refFormat, format); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		return 0, 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if format.XL.This == offlineDiskUUID { | 
					
						
							|  |  |  | 		return -1, -1, fmt.Errorf("diskID: %s is offline", format.XL.This) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := 0; i < len(refFormat.XL.Sets); i++ { | 
					
						
							|  |  |  | 		for j := 0; j < len(refFormat.XL.Sets[0]); j++ { | 
					
						
							|  |  |  | 			if refFormat.XL.Sets[i][j] == format.XL.This { | 
					
						
							|  |  |  | 				return i, j, nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return -1, -1, fmt.Errorf("diskID: %s not found", format.XL.This) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | // Re initializes all disks based on the reference format, this function is
 | 
					
						
							|  |  |  | // only used by HealFormat and ReloadFormat calls.
 | 
					
						
							|  |  |  | func (s *xlSets) reInitDisks(refFormat *formatXLV3, storageDisks []StorageAPI, formats []*formatXLV3) [][]StorageAPI { | 
					
						
							|  |  |  | 	xlDisks := make([][]StorageAPI, s.setCount) | 
					
						
							|  |  |  | 	for i := 0; i < len(refFormat.XL.Sets); i++ { | 
					
						
							|  |  |  | 		xlDisks[i] = make([]StorageAPI, s.drivesPerSet) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for k := range storageDisks { | 
					
						
							|  |  |  | 		if storageDisks[k] == nil || formats[k] == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		i, j, err := findDiskIndex(refFormat, formats[k]) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			reqInfo := (&logger.ReqInfo{}).AppendTags("storageDisk", storageDisks[i].String()) | 
					
						
							|  |  |  | 			ctx := logger.SetReqInfo(context.Background(), reqInfo) | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		xlDisks[i][j] = storageDisks[k] | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return xlDisks | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-05 23:25:55 +08:00
										 |  |  | // connectDisksWithQuorum is same as connectDisks but waits
 | 
					
						
							|  |  |  | // for quorum number of formatted disks to be online in
 | 
					
						
							|  |  |  | // any given sets.
 | 
					
						
							|  |  |  | func (s *xlSets) connectDisksWithQuorum() { | 
					
						
							|  |  |  | 	var onlineDisks int | 
					
						
							| 
									
										
										
										
											2018-10-09 06:47:13 +08:00
										 |  |  | 	for onlineDisks < len(s.endpoints)/2 { | 
					
						
							| 
									
										
										
										
											2018-09-05 23:25:55 +08:00
										 |  |  | 		for _, endpoint := range s.endpoints { | 
					
						
							|  |  |  | 			if s.isConnected(endpoint) { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			disk, format, err := connectEndpoint(endpoint) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				printEndpointError(endpoint, err) | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			i, j, err := findDiskIndex(s.format, format) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				// Close the internal connection to avoid connection leaks.
 | 
					
						
							|  |  |  | 				disk.Close() | 
					
						
							|  |  |  | 				printEndpointError(endpoint, err) | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			s.xlDisks[i][j] = disk | 
					
						
							|  |  |  | 			onlineDisks++ | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-10-05 08:44:06 +08:00
										 |  |  | 		// Sleep for a while - so that we don't go into
 | 
					
						
							|  |  |  | 		// 100% CPU when half the disks are online.
 | 
					
						
							|  |  |  | 		time.Sleep(500 * time.Millisecond) | 
					
						
							| 
									
										
										
										
											2018-09-05 23:25:55 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-28 09:11:39 +08:00
										 |  |  | // connectDisks - attempt to connect all the endpoints, loads format
 | 
					
						
							|  |  |  | // and re-arranges the disks in proper position.
 | 
					
						
							|  |  |  | func (s *xlSets) connectDisks() { | 
					
						
							|  |  |  | 	for _, endpoint := range s.endpoints { | 
					
						
							|  |  |  | 		if s.isConnected(endpoint) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		disk, format, err := connectEndpoint(endpoint) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			printEndpointError(endpoint, err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		i, j, err := findDiskIndex(s.format, format) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-04 12:58:48 +08:00
										 |  |  | 			// Close the internal connection to avoid connection leaks.
 | 
					
						
							|  |  |  | 			disk.Close() | 
					
						
							| 
									
										
										
										
											2018-03-28 09:11:39 +08:00
										 |  |  | 			printEndpointError(endpoint, err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		s.xlDisksMu.Lock() | 
					
						
							|  |  |  | 		s.xlDisks[i][j] = disk | 
					
						
							|  |  |  | 		s.xlDisksMu.Unlock() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | // monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
 | 
					
						
							|  |  |  | // endpoints by reconnecting them and making sure to place them into right position in
 | 
					
						
							|  |  |  | // the set topology, this monitoring happens at a given monitoring interval.
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | func (s *xlSets) monitorAndConnectEndpoints(monitorInterval time.Duration) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	ticker := time.NewTicker(monitorInterval) | 
					
						
							| 
									
										
										
										
											2018-05-05 01:43:20 +08:00
										 |  |  | 	// Stop the timer.
 | 
					
						
							|  |  |  | 	defer ticker.Stop() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	for { | 
					
						
							|  |  |  | 		select { | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 		case <-GlobalServiceDoneCh: | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 			return | 
					
						
							|  |  |  | 		case <-s.disksConnectDoneCh: | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			return | 
					
						
							|  |  |  | 		case <-ticker.C: | 
					
						
							| 
									
										
										
										
											2018-03-28 09:11:39 +08:00
										 |  |  | 			s.connectDisks() | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetDisks returns a closure for a given set, which provides list of disks per set.
 | 
					
						
							|  |  |  | func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI { | 
					
						
							|  |  |  | 	return func() []StorageAPI { | 
					
						
							|  |  |  | 		s.xlDisksMu.Lock() | 
					
						
							|  |  |  | 		defer s.xlDisksMu.Unlock() | 
					
						
							|  |  |  | 		disks := make([]StorageAPI, s.drivesPerSet) | 
					
						
							|  |  |  | 		copy(disks, s.xlDisks[setIndex]) | 
					
						
							|  |  |  | 		return disks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Initialize new set of erasure coded sets.
 | 
					
						
							| 
									
										
										
										
											2018-03-16 04:55:23 +08:00
										 |  |  | func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesPerSet int) (ObjectLayer, error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Initialize the XL sets instance.
 | 
					
						
							|  |  |  | 	s := &xlSets{ | 
					
						
							|  |  |  | 		sets:               make([]*xlObjects, setCount), | 
					
						
							|  |  |  | 		xlDisks:            make([][]StorageAPI, setCount), | 
					
						
							|  |  |  | 		endpoints:          endpoints, | 
					
						
							|  |  |  | 		setCount:           setCount, | 
					
						
							|  |  |  | 		drivesPerSet:       drivesPerSet, | 
					
						
							|  |  |  | 		format:             format, | 
					
						
							|  |  |  | 		disksConnectDoneCh: make(chan struct{}), | 
					
						
							|  |  |  | 		distributionAlgo:   format.XL.DistributionAlgo, | 
					
						
							|  |  |  | 		listPool:           newTreeWalkPool(globalLookupTimeout), | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mutex := newNSLock(globalIsDistXL) | 
					
						
							| 
									
										
										
										
											2018-06-02 07:41:23 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Initialize byte pool once for all sets, bpool size is set to
 | 
					
						
							|  |  |  | 	// setCount * drivesPerSet with each memory upto blockSizeV1.
 | 
					
						
							|  |  |  | 	bp := bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	for i := 0; i < len(format.XL.Sets); i++ { | 
					
						
							|  |  |  | 		s.xlDisks[i] = make([]StorageAPI, drivesPerSet) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Initialize xl objects for a given set.
 | 
					
						
							|  |  |  | 		s.sets[i] = &xlObjects{ | 
					
						
							|  |  |  | 			getDisks: s.GetDisks(i), | 
					
						
							|  |  |  | 			nsMutex:  mutex, | 
					
						
							| 
									
										
										
										
											2018-06-02 07:41:23 +08:00
										 |  |  | 			bp:       bp, | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 		go s.sets[i].cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-05 23:25:55 +08:00
										 |  |  | 	// Connect disks right away, but wait until we have `format.json` quorum.
 | 
					
						
							|  |  |  | 	s.connectDisksWithQuorum() | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Start the disk monitoring and connect routine.
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 	go s.monitorAndConnectEndpoints(defaultMonitorConnectEndpointInterval) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return s, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // StorageInfo - combines output of StorageInfo across all erasure coded object sets.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	var storageInfo StorageInfo | 
					
						
							| 
									
										
										
										
											2018-08-24 14:35:37 +08:00
										 |  |  | 	storageInfo.Backend.Type = BackendErasure | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	for _, set := range s.sets { | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 		lstorageInfo := set.StorageInfo(ctx) | 
					
						
							| 
									
										
										
										
											2018-05-23 18:11:29 +08:00
										 |  |  | 		storageInfo.Used = storageInfo.Used + lstorageInfo.Used | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks + lstorageInfo.Backend.OnlineDisks | 
					
						
							|  |  |  | 		storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks + lstorageInfo.Backend.OfflineDisks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	scData, scParity := getRedundancyCount(standardStorageClass, s.drivesPerSet) | 
					
						
							|  |  |  | 	storageInfo.Backend.StandardSCData = scData | 
					
						
							|  |  |  | 	storageInfo.Backend.StandardSCParity = scParity | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	rrSCData, rrSCparity := getRedundancyCount(reducedRedundancyStorageClass, s.drivesPerSet) | 
					
						
							|  |  |  | 	storageInfo.Backend.RRSCData = rrSCData | 
					
						
							|  |  |  | 	storageInfo.Backend.RRSCParity = rrSCparity | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-04 12:58:48 +08:00
										 |  |  | 	storageInfo.Backend.Sets = make([][]madmin.DriveInfo, s.setCount) | 
					
						
							|  |  |  | 	for i := range storageInfo.Backend.Sets { | 
					
						
							|  |  |  | 		storageInfo.Backend.Sets[i] = make([]madmin.DriveInfo, s.drivesPerSet) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageDisks, err := initStorageDisks(s.endpoints) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return storageInfo | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer closeStorageDisks(storageDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	formats, sErrs := loadFormatXLAll(storageDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	drivesInfo := formatsToDrivesInfo(s.endpoints, formats, sErrs) | 
					
						
							|  |  |  | 	refFormat, err := getFormatXLInQuorum(formats) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// Ignore errors here, since this call cannot do anything at
 | 
					
						
							|  |  |  | 		// this point. too many disks are down already.
 | 
					
						
							|  |  |  | 		return storageInfo | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// fill all the available/online endpoints
 | 
					
						
							|  |  |  | 	for _, drive := range drivesInfo { | 
					
						
							|  |  |  | 		if drive.UUID == "" { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for i := range refFormat.XL.Sets { | 
					
						
							|  |  |  | 			for j, driveUUID := range refFormat.XL.Sets[i] { | 
					
						
							|  |  |  | 				if driveUUID == drive.UUID { | 
					
						
							|  |  |  | 					storageInfo.Backend.Sets[i][j] = drive | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// fill all the offline, missing endpoints as well.
 | 
					
						
							|  |  |  | 	for _, drive := range drivesInfo { | 
					
						
							|  |  |  | 		if drive.UUID == "" { | 
					
						
							|  |  |  | 			for i := range storageInfo.Backend.Sets { | 
					
						
							|  |  |  | 				for j := range storageInfo.Backend.Sets[i] { | 
					
						
							|  |  |  | 					if storageInfo.Backend.Sets[i][j].Endpoint == drive.Endpoint { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					if storageInfo.Backend.Sets[i][j].Endpoint == "" { | 
					
						
							|  |  |  | 						storageInfo.Backend.Sets[i][j] = drive | 
					
						
							|  |  |  | 						break | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return storageInfo | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Shutdown shutsdown all erasure coded sets in parallel
 | 
					
						
							|  |  |  | // returns error upon first error.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) Shutdown(ctx context.Context) error { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(s.sets)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for index := range s.sets { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 			return s.sets[index].Shutdown(ctx) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, err := range g.Wait() { | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // MakeBucketLocation - creates a new bucket across all sets simultaneously
 | 
					
						
							|  |  |  | // even if one of the sets fail to create buckets, we proceed to undo a
 | 
					
						
							|  |  |  | // successful operation.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string) error { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(s.sets)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create buckets in parallel across all sets.
 | 
					
						
							|  |  |  | 	for index := range s.sets { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 			return s.sets[index].MakeBucketWithLocation(ctx, bucket, location) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs := g.Wait() | 
					
						
							|  |  |  | 	// Upon even a single write quorum error we undo all previously created buckets.
 | 
					
						
							|  |  |  | 	for _, err := range errs { | 
					
						
							| 
									
										
										
										
											2018-02-17 12:16:48 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			if _, ok := err.(InsufficientWriteQuorum); ok { | 
					
						
							|  |  |  | 				undoMakeBucketSets(bucket, s.sets, errs) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return err | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Success.
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // This function is used to undo a successful MakeBucket operation.
 | 
					
						
							|  |  |  | func undoMakeBucketSets(bucket string, sets []*xlObjects, errs []error) { | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(sets)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Undo previous make bucket entry on all underlying sets.
 | 
					
						
							|  |  |  | 	for index := range sets { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		if errs[index] == nil { | 
					
						
							|  |  |  | 			g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2018-03-16 04:27:16 +08:00
										 |  |  | 				return sets[index].DeleteBucket(context.Background(), bucket) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			}, index) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for all delete bucket to finish.
 | 
					
						
							|  |  |  | 	g.Wait() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // hashes the key returning an integer based on the input algorithm.
 | 
					
						
							|  |  |  | // This function currently supports
 | 
					
						
							|  |  |  | // - CRCMOD
 | 
					
						
							|  |  |  | // - all new algos.
 | 
					
						
							|  |  |  | func crcHashMod(key string, cardinality int) int { | 
					
						
							|  |  |  | 	if cardinality <= 0 { | 
					
						
							|  |  |  | 		return -1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable) | 
					
						
							|  |  |  | 	return int(keyCrc % uint32(cardinality)) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func hashKey(algo string, key string, cardinality int) int { | 
					
						
							|  |  |  | 	switch algo { | 
					
						
							|  |  |  | 	case formatXLVersionV2DistributionAlgo: | 
					
						
							|  |  |  | 		return crcHashMod(key, cardinality) | 
					
						
							| 
									
										
										
										
											2018-08-07 01:26:40 +08:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 		// Unknown algorithm returns -1, also if cardinality is lesser than 0.
 | 
					
						
							|  |  |  | 		return -1 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns always a same erasure coded set for a given input.
 | 
					
						
							|  |  |  | func (s *xlSets) getHashedSet(input string) (set *xlObjects) { | 
					
						
							|  |  |  | 	return s.sets[hashKey(s.distributionAlgo, input, len(s.sets))] | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetBucketInfo - returns bucket info from one of the erasure coded set.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(bucket).GetBucketInfo(ctx, bucket) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ListObjectsV2 lists all objects in bucket filtered by prefix
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { | 
					
						
							| 
									
										
										
										
											2018-07-01 12:22:45 +08:00
										 |  |  | 	marker := continuationToken | 
					
						
							|  |  |  | 	if marker == "" { | 
					
						
							|  |  |  | 		marker = startAfter | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return result, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	listObjectsV2Info := ListObjectsV2Info{ | 
					
						
							|  |  |  | 		IsTruncated:           loi.IsTruncated, | 
					
						
							|  |  |  | 		ContinuationToken:     continuationToken, | 
					
						
							|  |  |  | 		NextContinuationToken: loi.NextMarker, | 
					
						
							|  |  |  | 		Objects:               loi.Objects, | 
					
						
							|  |  |  | 		Prefixes:              loi.Prefixes, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return listObjectsV2Info, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // SetBucketPolicy persist the new policy on the bucket.
 | 
					
						
							| 
									
										
										
										
											2018-04-25 06:53:30 +08:00
										 |  |  | func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error { | 
					
						
							| 
									
										
										
										
											2018-10-10 05:00:01 +08:00
										 |  |  | 	return savePolicyConfig(ctx, s, bucket, policy) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetBucketPolicy will return a policy on a bucket
 | 
					
						
							| 
									
										
										
										
											2018-04-25 06:53:30 +08:00
										 |  |  | func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { | 
					
						
							| 
									
										
										
										
											2018-06-27 14:59:48 +08:00
										 |  |  | 	return getPolicyConfig(s, bucket) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // DeleteBucketPolicy deletes all policies on bucket
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error { | 
					
						
							| 
									
										
										
										
											2018-04-25 06:53:30 +08:00
										 |  |  | 	return removePolicyConfig(ctx, s, bucket) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // IsNotificationSupported returns whether bucket notification is applicable for this layer.
 | 
					
						
							|  |  |  | func (s *xlSets) IsNotificationSupported() bool { | 
					
						
							|  |  |  | 	return s.getHashedSet("").IsNotificationSupported() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-06 06:03:42 +08:00
										 |  |  | // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer.
 | 
					
						
							|  |  |  | func (s *xlSets) IsListenBucketSupported() bool { | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | // IsEncryptionSupported returns whether server side encryption is implemented for this layer.
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | func (s *xlSets) IsEncryptionSupported() bool { | 
					
						
							|  |  |  | 	return s.getHashedSet("").IsEncryptionSupported() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | // IsCompressionSupported returns whether compression is applicable for this layer.
 | 
					
						
							|  |  |  | func (s *xlSets) IsCompressionSupported() bool { | 
					
						
							|  |  |  | 	return s.getHashedSet("").IsCompressionSupported() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | // DeleteBucket - deletes a bucket on all sets simultaneously,
 | 
					
						
							|  |  |  | // even if one of the sets fail to delete buckets, we proceed to
 | 
					
						
							|  |  |  | // undo a successful operation.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(s.sets)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Delete buckets in parallel across all sets.
 | 
					
						
							|  |  |  | 	for index := range s.sets { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 			return s.sets[index].DeleteBucket(ctx, bucket) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs := g.Wait() | 
					
						
							|  |  |  | 	// For any write quorum failure, we undo all the delete buckets operation
 | 
					
						
							|  |  |  | 	// by creating all the buckets again.
 | 
					
						
							|  |  |  | 	for _, err := range errs { | 
					
						
							| 
									
										
										
										
											2018-02-17 12:16:48 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			if _, ok := err.(InsufficientWriteQuorum); ok { | 
					
						
							|  |  |  | 				undoDeleteBucketSets(bucket, s.sets, errs) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return err | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Delete all bucket metadata.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	deleteBucketMetadata(ctx, bucket, s) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Success.
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // This function is used to undo a successful DeleteBucket operation.
 | 
					
						
							|  |  |  | func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) { | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(sets)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Undo previous delete bucket on all underlying sets.
 | 
					
						
							|  |  |  | 	for index := range sets { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		if errs[index] == nil { | 
					
						
							|  |  |  | 			g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2018-03-16 04:27:16 +08:00
										 |  |  | 				return sets[index].MakeBucketWithLocation(context.Background(), bucket, "") | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			}, index) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	g.Wait() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // List all buckets from one of the set, we are not doing merge
 | 
					
						
							|  |  |  | // sort here just for simplification. As per design it is assumed
 | 
					
						
							|  |  |  | // that all buckets are present on all sets.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Always lists from the same set signified by the empty string.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 	return s.getHashedSet("").ListBuckets(ctx) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // --- Object Operations ---
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | // GetObjectNInfo - returns object info and locked object ReadCloser
 | 
					
						
							| 
									
										
										
										
											2018-09-27 18:06:45 +08:00
										 |  |  | func (s *xlSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | // GetObject - reads an object from the hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObject - writes an object to hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	return s.getHashedSet(object).PutObject(ctx, bucket, object, data, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetObjectInfo - reads object metadata from the hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // DeleteObject - deletes an object from the hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string) (err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).DeleteObject(ctx, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	srcSet := s.getHashedSet(srcObject) | 
					
						
							|  |  |  | 	destSet := s.getHashedSet(destObject) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Check if this request is only metadata update.
 | 
					
						
							| 
									
										
										
										
											2018-02-24 07:07:21 +08:00
										 |  |  | 	cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(destBucket, destObject)) | 
					
						
							|  |  |  | 	if cpSrcDstSame && srcInfo.metadataOnly { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-24 07:07:21 +08:00
										 |  |  | 	if !cpSrcDstSame { | 
					
						
							| 
									
										
										
										
											2018-09-26 03:39:46 +08:00
										 |  |  | 		objectDWLock := destSet.nsMutex.NewNSLock(destBucket, destObject) | 
					
						
							|  |  |  | 		if err := objectDWLock.GetLock(globalObjectTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-24 07:07:21 +08:00
										 |  |  | 			return objInfo, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-09-26 03:39:46 +08:00
										 |  |  | 		defer objectDWLock.Unlock() | 
					
						
							| 
									
										
										
										
											2018-02-24 07:07:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	return destSet.putObject(ctx, destBucket, destObject, srcInfo.PutObjReader, srcInfo.UserDefined, dstOpts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns function "listDir" of the type listDirFunc.
 | 
					
						
							|  |  |  | // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
 | 
					
						
							|  |  |  | // disks - used for doing disk.ListDir(). Sets passes set of disks.
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | func listDirSetsFactory(ctx context.Context, isLeaf isLeafFunc, isLeafDir isLeafDirFunc, sets ...[]StorageAPI) listDirFunc { | 
					
						
							|  |  |  | 	listDirInternal := func(bucket, prefixDir, prefixEntry string, disks []StorageAPI) (mergedEntries []string) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		for _, disk := range disks { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			var entries []string | 
					
						
							|  |  |  | 			var newEntries []string | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 			var err error | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 			entries, err = disk.ListDir(bucket, prefixDir, -1) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Find elements in entries which are not in mergedEntries
 | 
					
						
							|  |  |  | 			for _, entry := range entries { | 
					
						
							|  |  |  | 				idx := sort.SearchStrings(mergedEntries, entry) | 
					
						
							|  |  |  | 				// if entry is already present in mergedEntries don't add.
 | 
					
						
							|  |  |  | 				if idx < len(mergedEntries) && mergedEntries[idx] == entry { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				newEntries = append(newEntries, entry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if len(newEntries) > 0 { | 
					
						
							|  |  |  | 				// Merge the entries and sort it.
 | 
					
						
							|  |  |  | 				mergedEntries = append(mergedEntries, newEntries...) | 
					
						
							|  |  |  | 				sort.Strings(mergedEntries) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		return mergedEntries | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// listDir - lists all the entries at a given prefix and given entry in the prefix.
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 	listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		for _, disks := range sets { | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 			entries := listDirInternal(bucket, prefixDir, prefixEntry, disks) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			var newEntries []string | 
					
						
							|  |  |  | 			// Find elements in entries which are not in mergedEntries
 | 
					
						
							|  |  |  | 			for _, entry := range entries { | 
					
						
							|  |  |  | 				idx := sort.SearchStrings(mergedEntries, entry) | 
					
						
							|  |  |  | 				// if entry is already present in mergedEntries don't add.
 | 
					
						
							|  |  |  | 				if idx < len(mergedEntries) && mergedEntries[idx] == entry { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				newEntries = append(newEntries, entry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if len(newEntries) > 0 { | 
					
						
							|  |  |  | 				// Merge the entries and sort it.
 | 
					
						
							|  |  |  | 				mergedEntries = append(mergedEntries, newEntries...) | 
					
						
							|  |  |  | 				sort.Strings(mergedEntries) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		mergedEntries, delayIsLeaf = filterListEntries(bucket, prefixDir, mergedEntries, prefixEntry, isLeaf) | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		return mergedEntries, delayIsLeaf | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return listDir | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ListObjects - implements listing of objects across sets, each set is independently
 | 
					
						
							|  |  |  | // listed and subsequently merge lexically sorted inside listDirSetsFactory(). Resulting
 | 
					
						
							|  |  |  | // value through the walk channel receives the data properly lexically sorted.
 | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { | 
					
						
							|  |  |  | 	var result ListObjectsInfo | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// validate all the inputs for listObjects
 | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | 	if err := checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		return result, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var objInfos []ObjectInfo | 
					
						
							|  |  |  | 	var eof bool | 
					
						
							|  |  |  | 	var nextMarker string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	recursive := true | 
					
						
							|  |  |  | 	if delimiter == slashSeparator { | 
					
						
							|  |  |  | 		recursive = false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	walkResultCh, endWalkCh := s.listPool.Release(listParams{bucket, recursive, marker, prefix, false}) | 
					
						
							|  |  |  | 	if walkResultCh == nil { | 
					
						
							|  |  |  | 		endWalkCh = make(chan struct{}) | 
					
						
							|  |  |  | 		isLeaf := func(bucket, entry string) bool { | 
					
						
							|  |  |  | 			entry = strings.TrimSuffix(entry, slashSeparator) | 
					
						
							|  |  |  | 			// Verify if we are at the leaf, a leaf is where we
 | 
					
						
							|  |  |  | 			// see `xl.json` inside a directory.
 | 
					
						
							|  |  |  | 			return s.getHashedSet(entry).isObject(bucket, entry) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		isLeafDir := func(bucket, entry string) bool { | 
					
						
							| 
									
										
										
										
											2018-08-17 07:49:38 +08:00
										 |  |  | 			// Verify prefixes in all sets.
 | 
					
						
							|  |  |  | 			var ok bool | 
					
						
							|  |  |  | 			for _, set := range s.sets { | 
					
						
							|  |  |  | 				ok = set.isObjectDir(bucket, entry) | 
					
						
							|  |  |  | 				if ok { | 
					
						
							|  |  |  | 					return true | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return false | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		var setDisks = make([][]StorageAPI, len(s.sets)) | 
					
						
							|  |  |  | 		for _, set := range s.sets { | 
					
						
							|  |  |  | 			setDisks = append(setDisks, set.getLoadBalancedDisks()) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		listDir := listDirSetsFactory(ctx, isLeaf, isLeafDir, setDisks...) | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := 0; i < maxKeys; { | 
					
						
							|  |  |  | 		walkResult, ok := <-walkResultCh | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			// Closed channel.
 | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		// For any walk error return right away.
 | 
					
						
							|  |  |  | 		if walkResult.err != nil { | 
					
						
							|  |  |  | 			return result, toObjectErr(walkResult.err, bucket, prefix) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		var objInfo ObjectInfo | 
					
						
							| 
									
										
										
										
											2018-04-03 08:26:34 +08:00
										 |  |  | 		var err error | 
					
						
							|  |  |  | 		if hasSuffix(walkResult.entry, slashSeparator) { | 
					
						
							| 
									
										
										
										
											2018-08-17 07:49:38 +08:00
										 |  |  | 			// Verify prefixes in all sets.
 | 
					
						
							|  |  |  | 			for _, set := range s.sets { | 
					
						
							|  |  |  | 				objInfo, err = set.getObjectInfoDir(ctx, bucket, walkResult.entry) | 
					
						
							|  |  |  | 				if err == nil { | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			objInfo, err = s.getHashedSet(walkResult.entry).getObjectInfo(ctx, bucket, walkResult.entry) | 
					
						
							| 
									
										
										
										
											2018-04-03 08:26:34 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			// Ignore errFileNotFound as the object might have got
 | 
					
						
							|  |  |  | 			// deleted in the interim period of listing and getObjectInfo(),
 | 
					
						
							|  |  |  | 			// ignore quorum error as it might be an entry from an outdated disk.
 | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | 			if IsErrIgnored(err, []error{ | 
					
						
							|  |  |  | 				errFileNotFound, | 
					
						
							|  |  |  | 				errXLReadQuorum, | 
					
						
							|  |  |  | 			}...) { | 
					
						
							| 
									
										
										
										
											2018-04-03 08:26:34 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-04-03 08:26:34 +08:00
										 |  |  | 			return result, toObjectErr(err, bucket, prefix) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		nextMarker = objInfo.Name | 
					
						
							|  |  |  | 		objInfos = append(objInfos, objInfo) | 
					
						
							|  |  |  | 		i++ | 
					
						
							|  |  |  | 		if walkResult.end { | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	params := listParams{bucket, recursive, nextMarker, prefix, false} | 
					
						
							|  |  |  | 	if !eof { | 
					
						
							|  |  |  | 		s.listPool.Set(params, walkResultCh, endWalkCh) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result = ListObjectsInfo{IsTruncated: !eof} | 
					
						
							|  |  |  | 	for _, objInfo := range objInfos { | 
					
						
							|  |  |  | 		result.NextMarker = objInfo.Name | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		if objInfo.IsDir && delimiter == slashSeparator { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			result.Prefixes = append(result.Prefixes, objInfo.Name) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		result.Objects = append(result.Objects, objInfo) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return result, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// In list multipart uploads we are going to treat input prefix as the object,
 | 
					
						
							|  |  |  | 	// this means that we are not supporting directory navigation.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 	return s.getHashedSet(prefix).ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Initiate a new multipart upload on a hashedSet based on object name.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Copies a part of an object from source hashedSet to destination hashedSet.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	destSet := s.getHashedSet(destObject) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObjectPart - writes part of an object to hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ListObjectParts - lists all uploaded parts to an object in hashedSet.
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Aborts an in-progress multipart operation on hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).AbortMultipartUpload(ctx, bucket, object, uploadID) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | All disks online | 
					
						
							|  |  |  | ----------------- | 
					
						
							|  |  |  | - All Unformatted - format all and return success. | 
					
						
							|  |  |  | - Some Unformatted - format all and return success. | 
					
						
							|  |  |  | - Any JBOD inconsistent - return failure | 
					
						
							|  |  |  | - Some are corrupt (missing format.json) - return failure | 
					
						
							|  |  |  | - Any unrecognized disks - return failure | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | Some disks are offline and we have quorum. | 
					
						
							|  |  |  | ----------------- | 
					
						
							|  |  |  | - Some unformatted - format all and return success, | 
					
						
							|  |  |  |   treat disks offline as corrupted. | 
					
						
							|  |  |  | - Any JBOD inconsistent - return failure | 
					
						
							|  |  |  | - Some are corrupt (missing format.json) | 
					
						
							|  |  |  | - Any unrecognized disks - return failure | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | No read quorum | 
					
						
							|  |  |  | ----------------- | 
					
						
							|  |  |  | failure for all cases. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Pseudo code for managing `format.json`.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Generic checks.
 | 
					
						
							|  |  |  | if (no quorum) return error | 
					
						
							|  |  |  | if (any disk is corrupt) return error // Always error
 | 
					
						
							|  |  |  | if (jbod inconsistent) return error // Always error.
 | 
					
						
							|  |  |  | if (disks not recognized) // Always error.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Specific checks.
 | 
					
						
							|  |  |  | if (all disks online) | 
					
						
							|  |  |  |   if (all disks return format.json) | 
					
						
							|  |  |  |      if (jbod consistent) | 
					
						
							|  |  |  |         if (all disks recognized) | 
					
						
							|  |  |  |           return | 
					
						
							|  |  |  |   else | 
					
						
							|  |  |  |      if (all disks return format.json not found) | 
					
						
							|  |  |  |         return error | 
					
						
							|  |  |  |      else (some disks return format.json not found) | 
					
						
							|  |  |  |         (heal format) | 
					
						
							|  |  |  |         return | 
					
						
							|  |  |  |      fi | 
					
						
							|  |  |  |    fi | 
					
						
							|  |  |  | else | 
					
						
							|  |  |  |    if (some disks return format.json not found) | 
					
						
							|  |  |  |         // Offline disks are marked as dead.
 | 
					
						
							|  |  |  |         (heal format) // Offline disks should be marked as dead.
 | 
					
						
							|  |  |  |         return success | 
					
						
							|  |  |  |    fi | 
					
						
							|  |  |  | fi | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-16 04:55:23 +08:00
										 |  |  | func formatsToDrivesInfo(endpoints EndpointList, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Existing formats are available (i.e. ok), so save it in
 | 
					
						
							|  |  |  | 	// result, also populate disks to be healed.
 | 
					
						
							|  |  |  | 	for i, format := range formats { | 
					
						
							|  |  |  | 		drive := endpoints.GetString(i) | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case format != nil: | 
					
						
							|  |  |  | 			beforeDrives = append(beforeDrives, madmin.DriveInfo{ | 
					
						
							|  |  |  | 				UUID:     format.XL.This, | 
					
						
							|  |  |  | 				Endpoint: drive, | 
					
						
							|  |  |  | 				State:    madmin.DriveStateOk, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		case sErrs[i] == errUnformattedDisk: | 
					
						
							|  |  |  | 			beforeDrives = append(beforeDrives, madmin.DriveInfo{ | 
					
						
							|  |  |  | 				UUID:     "", | 
					
						
							|  |  |  | 				Endpoint: drive, | 
					
						
							|  |  |  | 				State:    madmin.DriveStateMissing, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		case sErrs[i] == errCorruptedFormat: | 
					
						
							|  |  |  | 			beforeDrives = append(beforeDrives, madmin.DriveInfo{ | 
					
						
							|  |  |  | 				UUID:     "", | 
					
						
							|  |  |  | 				Endpoint: drive, | 
					
						
							|  |  |  | 				State:    madmin.DriveStateCorrupt, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			beforeDrives = append(beforeDrives, madmin.DriveInfo{ | 
					
						
							|  |  |  | 				UUID:     "", | 
					
						
							|  |  |  | 				Endpoint: drive, | 
					
						
							|  |  |  | 				State:    madmin.DriveStateOffline, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return beforeDrives | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | // Reloads the format from the disk, usually called by a remote peer notifier while
 | 
					
						
							|  |  |  | // healing in a distributed setup.
 | 
					
						
							|  |  |  | func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) { | 
					
						
							|  |  |  | 	// Acquire lock on format.json
 | 
					
						
							|  |  |  | 	formatLock := s.getHashedSet(formatConfigFile).nsMutex.NewNSLock(minioMetaBucket, formatConfigFile) | 
					
						
							|  |  |  | 	if err = formatLock.GetRLock(globalHealingTimeout); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer formatLock.RUnlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageDisks, err := initStorageDisks(s.endpoints) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer func(storageDisks []StorageAPI) { | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			closeStorageDisks(storageDisks) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}(storageDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	formats, sErrs := loadFormatXLAll(storageDisks) | 
					
						
							|  |  |  | 	if err = checkFormatXLValues(formats); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for index, sErr := range sErrs { | 
					
						
							|  |  |  | 		if sErr != nil { | 
					
						
							|  |  |  | 			// Look for acceptable heal errors, for any other
 | 
					
						
							|  |  |  | 			// errors we should simply quit and return.
 | 
					
						
							|  |  |  | 			if _, ok := formatHealErrors[sErr]; !ok { | 
					
						
							|  |  |  | 				return fmt.Errorf("Disk %s: %s", s.endpoints[index], sErr) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	refFormat, err := getFormatXLInQuorum(formats) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// kill the monitoring loop such that we stop writing
 | 
					
						
							|  |  |  | 	// to indicate that we will re-initialize everything
 | 
					
						
							|  |  |  | 	// with new format.
 | 
					
						
							|  |  |  | 	s.disksConnectDoneCh <- struct{}{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Replace the new format.
 | 
					
						
							|  |  |  | 	s.format = refFormat | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	s.xlDisksMu.Lock() | 
					
						
							|  |  |  | 	{ | 
					
						
							|  |  |  | 		// Close all existing disks.
 | 
					
						
							|  |  |  | 		s.xlDisks.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Re initialize disks, after saving the new reference format.
 | 
					
						
							|  |  |  | 		s.xlDisks = s.reInitDisks(refFormat, storageDisks, formats) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	s.xlDisksMu.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Restart monitoring loop to monitor reformatted disks again.
 | 
					
						
							|  |  |  | 	go s.monitorAndConnectEndpoints(defaultMonitorConnectEndpointInterval) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-01 11:37:39 +08:00
										 |  |  | // HealFormat - heals missing `format.json` on fresh unformatted disks.
 | 
					
						
							|  |  |  | // TODO: In future support corrupted disks missing format.json but has erasure
 | 
					
						
							|  |  |  | // coded data in it.
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Acquire lock on format.json
 | 
					
						
							|  |  |  | 	formatLock := s.getHashedSet(formatConfigFile).nsMutex.NewNSLock(minioMetaBucket, formatConfigFile) | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 	if err = formatLock.GetLock(globalHealingTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		return madmin.HealResultItem{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer formatLock.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-04 12:58:48 +08:00
										 |  |  | 	storageDisks, err := initStorageDisks(s.endpoints) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return madmin.HealResultItem{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	defer func(storageDisks []StorageAPI) { | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			closeStorageDisks(storageDisks) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}(storageDisks) | 
					
						
							| 
									
										
										
										
											2018-04-04 12:58:48 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	formats, sErrs := loadFormatXLAll(storageDisks) | 
					
						
							|  |  |  | 	if err = checkFormatXLValues(formats); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		return madmin.HealResultItem{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Prepare heal-result
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 	res = madmin.HealResultItem{ | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		Type:      madmin.HealItemMetadata, | 
					
						
							|  |  |  | 		Detail:    "disk-format", | 
					
						
							|  |  |  | 		DiskCount: s.setCount * s.drivesPerSet, | 
					
						
							|  |  |  | 		SetCount:  s.setCount, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Fetch all the drive info status.
 | 
					
						
							|  |  |  | 	beforeDrives := formatsToDrivesInfo(s.endpoints, formats, sErrs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	res.After.Drives = make([]madmin.HealDriveInfo, len(beforeDrives)) | 
					
						
							|  |  |  | 	res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives)) | 
					
						
							|  |  |  | 	// Copy "after" drive state too from before.
 | 
					
						
							|  |  |  | 	for k, v := range beforeDrives { | 
					
						
							|  |  |  | 		res.Before.Drives[k] = madmin.HealDriveInfo{ | 
					
						
							|  |  |  | 			UUID:     v.UUID, | 
					
						
							|  |  |  | 			Endpoint: v.Endpoint, | 
					
						
							|  |  |  | 			State:    v.State, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		res.After.Drives[k] = madmin.HealDriveInfo{ | 
					
						
							|  |  |  | 			UUID:     v.UUID, | 
					
						
							|  |  |  | 			Endpoint: v.Endpoint, | 
					
						
							|  |  |  | 			State:    v.State, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for index, sErr := range sErrs { | 
					
						
							|  |  |  | 		if sErr != nil { | 
					
						
							|  |  |  | 			// Look for acceptable heal errors, for any other
 | 
					
						
							|  |  |  | 			// errors we should simply quit and return.
 | 
					
						
							|  |  |  | 			if _, ok := formatHealErrors[sErr]; !ok { | 
					
						
							|  |  |  | 				return res, fmt.Errorf("Disk %s: %s", s.endpoints[index], sErr) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-01 11:37:39 +08:00
										 |  |  | 	if !hasAnyErrorsUnformatted(sErrs) { | 
					
						
							|  |  |  | 		// No unformatted disks found disks are either offline
 | 
					
						
							|  |  |  | 		// or online, no healing is required.
 | 
					
						
							|  |  |  | 		return res, errNoHealRequired | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// All disks are unformatted, return quorum error.
 | 
					
						
							|  |  |  | 	if shouldInitXLDisks(sErrs) { | 
					
						
							|  |  |  | 		return res, errXLReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	refFormat, err := getFormatXLInQuorum(formats) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return res, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Mark all UUIDs which might be offline, use list
 | 
					
						
							|  |  |  | 	// of formats to mark them appropriately.
 | 
					
						
							|  |  |  | 	markUUIDsOffline(refFormat, formats) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Initialize a new set of set formats which will be written to disk.
 | 
					
						
							|  |  |  | 	newFormatSets := newHealFormatSets(refFormat, s.setCount, s.drivesPerSet, formats, sErrs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Look for all offline/unformatted disks in our reference format,
 | 
					
						
							|  |  |  | 	// such that we can fill them up with new UUIDs, this looping also
 | 
					
						
							|  |  |  | 	// ensures that the replaced disks allocated evenly across all sets.
 | 
					
						
							|  |  |  | 	// Making sure that the redundancy is not lost.
 | 
					
						
							|  |  |  | 	for i := range refFormat.XL.Sets { | 
					
						
							|  |  |  | 		for j := range refFormat.XL.Sets[i] { | 
					
						
							|  |  |  | 			if refFormat.XL.Sets[i][j] == offlineDiskUUID { | 
					
						
							|  |  |  | 				for l := range newFormatSets[i] { | 
					
						
							|  |  |  | 					if newFormatSets[i][l] == nil { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					if newFormatSets[i][l].XL.This == "" { | 
					
						
							|  |  |  | 						newFormatSets[i][l].XL.This = mustGetUUID() | 
					
						
							|  |  |  | 						refFormat.XL.Sets[i][j] = newFormatSets[i][l].XL.This | 
					
						
							|  |  |  | 						for m, v := range res.After.Drives { | 
					
						
							|  |  |  | 							if v.Endpoint == s.endpoints.GetString(i*s.drivesPerSet+l) { | 
					
						
							|  |  |  | 								res.After.Drives[m].UUID = newFormatSets[i][l].XL.This | 
					
						
							|  |  |  | 								res.After.Drives[m].State = madmin.DriveStateOk | 
					
						
							|  |  |  | 							} | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						break | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !dryRun { | 
					
						
							| 
									
										
										
										
											2018-03-16 04:55:23 +08:00
										 |  |  | 		var tmpNewFormats = make([]*formatXLV3, s.setCount*s.drivesPerSet) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		for i := range newFormatSets { | 
					
						
							|  |  |  | 			for j := range newFormatSets[i] { | 
					
						
							|  |  |  | 				if newFormatSets[i][j] == nil { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				tmpNewFormats[i*s.drivesPerSet+j] = newFormatSets[i][j] | 
					
						
							|  |  |  | 				tmpNewFormats[i*s.drivesPerSet+j].XL.Sets = refFormat.XL.Sets | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Initialize meta volume, if volume already exists ignores it, all disks which
 | 
					
						
							|  |  |  | 		// are not found are ignored as well.
 | 
					
						
							| 
									
										
										
										
											2018-04-04 12:58:48 +08:00
										 |  |  | 		if err = initFormatXLMetaVolume(storageDisks, tmpNewFormats); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			return madmin.HealResultItem{}, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Save formats `format.json` across all disks.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			return madmin.HealResultItem{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 		// kill the monitoring loop such that we stop writing
 | 
					
						
							|  |  |  | 		// to indicate that we will re-initialize everything
 | 
					
						
							|  |  |  | 		// with new format.
 | 
					
						
							|  |  |  | 		s.disksConnectDoneCh <- struct{}{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Replace with new reference format.
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		s.format = refFormat | 
					
						
							| 
									
										
										
										
											2018-03-28 09:11:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-10 01:25:41 +08:00
										 |  |  | 		s.xlDisksMu.Lock() | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// Disconnect/relinquish all existing disks.
 | 
					
						
							|  |  |  | 			s.xlDisks.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Re initialize disks, after saving the new reference format.
 | 
					
						
							|  |  |  | 			s.xlDisks = s.reInitDisks(refFormat, storageDisks, tmpNewFormats) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		s.xlDisksMu.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Restart our monitoring loop to start monitoring newly formatted disks.
 | 
					
						
							|  |  |  | 		go s.monitorAndConnectEndpoints(defaultMonitorConnectEndpointInterval) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return res, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // HealBucket - heals inconsistent buckets and bucket metadata on all sets.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun bool) (results []madmin.HealResultItem, err error) { | 
					
						
							| 
									
										
										
										
											2018-03-17 06:09:31 +08:00
										 |  |  | 	bucketLock := globalNSMutex.NewNSLock(bucket, "") | 
					
						
							|  |  |  | 	if err := bucketLock.GetLock(globalHealingTimeout); err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer bucketLock.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Initialize heal result info
 | 
					
						
							|  |  |  | 	res := madmin.HealResultItem{ | 
					
						
							|  |  |  | 		Type:      madmin.HealItemBucket, | 
					
						
							|  |  |  | 		Bucket:    bucket, | 
					
						
							|  |  |  | 		DiskCount: s.setCount * s.drivesPerSet, | 
					
						
							|  |  |  | 		SetCount:  s.setCount, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, s := range s.sets { | 
					
						
							|  |  |  | 		var setResults []madmin.HealResultItem | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 		setResults, _ = s.HealBucket(ctx, bucket, dryRun) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		for _, setResult := range setResults { | 
					
						
							|  |  |  | 			if setResult.Type == madmin.HealItemBucket { | 
					
						
							|  |  |  | 				for _, v := range setResult.Before.Drives { | 
					
						
							|  |  |  | 					res.Before.Drives = append(res.Before.Drives, v) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				for _, v := range setResult.After.Drives { | 
					
						
							|  |  |  | 					res.After.Drives = append(res.After.Drives, v) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			results = append(results, setResult) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, endpoint := range s.endpoints { | 
					
						
							|  |  |  | 		var foundBefore bool | 
					
						
							|  |  |  | 		for _, v := range res.Before.Drives { | 
					
						
							| 
									
										
										
										
											2018-10-27 01:25:52 +08:00
										 |  |  | 			if endpoint.IsLocal { | 
					
						
							|  |  |  | 				if v.Endpoint == endpoint.Path { | 
					
						
							|  |  |  | 					foundBefore = true | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				if v.Endpoint == endpoint.String() { | 
					
						
							|  |  |  | 					foundBefore = true | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if !foundBefore { | 
					
						
							|  |  |  | 			res.Before.Drives = append(res.Before.Drives, madmin.HealDriveInfo{ | 
					
						
							|  |  |  | 				UUID:     "", | 
					
						
							|  |  |  | 				Endpoint: endpoint.String(), | 
					
						
							|  |  |  | 				State:    madmin.DriveStateOffline, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		var foundAfter bool | 
					
						
							|  |  |  | 		for _, v := range res.After.Drives { | 
					
						
							| 
									
										
										
										
											2018-10-27 01:25:52 +08:00
										 |  |  | 			if endpoint.IsLocal { | 
					
						
							|  |  |  | 				if v.Endpoint == endpoint.Path { | 
					
						
							|  |  |  | 					foundAfter = true | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				if v.Endpoint == endpoint.String() { | 
					
						
							|  |  |  | 					foundAfter = true | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if !foundAfter { | 
					
						
							|  |  |  | 			res.After.Drives = append(res.After.Drives, madmin.HealDriveInfo{ | 
					
						
							|  |  |  | 				UUID:     "", | 
					
						
							|  |  |  | 				Endpoint: endpoint.String(), | 
					
						
							|  |  |  | 				State:    madmin.DriveStateOffline, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Check if we had quorum to write, if not return an appropriate error.
 | 
					
						
							|  |  |  | 	_, afterDriveOnline := res.GetOnlineCounts() | 
					
						
							| 
									
										
										
										
											2018-03-27 07:36:57 +08:00
										 |  |  | 	if afterDriveOnline < ((s.setCount*s.drivesPerSet)/2)+1 { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		return nil, toObjectErr(errXLWriteQuorum, bucket) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	results = append(results, res) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return results, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // HealObject - heals inconsistent object on a hashedSet based on object name.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) HealObject(ctx context.Context, bucket, object string, dryRun bool) (madmin.HealResultItem, error) { | 
					
						
							|  |  |  | 	return s.getHashedSet(object).HealObject(ctx, bucket, object, dryRun) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Lists all buckets which need healing.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	listBuckets := []BucketInfo{} | 
					
						
							|  |  |  | 	var healBuckets = map[string]BucketInfo{} | 
					
						
							|  |  |  | 	for _, set := range s.sets { | 
					
						
							|  |  |  | 		buckets, _, err := listAllBuckets(set.getDisks()) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for _, currBucket := range buckets { | 
					
						
							|  |  |  | 			healBuckets[currBucket.Name] = BucketInfo{ | 
					
						
							|  |  |  | 				Name:    currBucket.Name, | 
					
						
							|  |  |  | 				Created: currBucket.Created, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for _, bucketInfo := range healBuckets { | 
					
						
							|  |  |  | 		listBuckets = append(listBuckets, bucketInfo) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return listBuckets, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns function "listDir" of the type listDirFunc.
 | 
					
						
							|  |  |  | // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
 | 
					
						
							|  |  |  | // disks - used for doing disk.ListDir(). Sets passes set of disks.
 | 
					
						
							|  |  |  | func listDirSetsHealFactory(isLeaf isLeafFunc, sets ...[]StorageAPI) listDirFunc { | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 	listDirInternal := func(bucket, prefixDir, prefixEntry string, disks []StorageAPI) (mergedEntries []string) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		for _, disk := range disks { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			var entries []string | 
					
						
							|  |  |  | 			var newEntries []string | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 			var err error | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 			entries, err = disk.ListDir(bucket, prefixDir, -1) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Filter entries that have the prefix prefixEntry.
 | 
					
						
							|  |  |  | 			entries = filterMatchingPrefix(entries, prefixEntry) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// isLeaf() check has to happen here so that
 | 
					
						
							|  |  |  | 			// trailing "/" for objects can be removed.
 | 
					
						
							|  |  |  | 			for i, entry := range entries { | 
					
						
							|  |  |  | 				if isLeaf(bucket, pathJoin(prefixDir, entry)) { | 
					
						
							|  |  |  | 					entries[i] = strings.TrimSuffix(entry, slashSeparator) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Find elements in entries which are not in mergedEntries
 | 
					
						
							|  |  |  | 			for _, entry := range entries { | 
					
						
							|  |  |  | 				idx := sort.SearchStrings(mergedEntries, entry) | 
					
						
							|  |  |  | 				// if entry is already present in mergedEntries don't add.
 | 
					
						
							|  |  |  | 				if idx < len(mergedEntries) && mergedEntries[idx] == entry { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				newEntries = append(newEntries, entry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if len(newEntries) > 0 { | 
					
						
							|  |  |  | 				// Merge the entries and sort it.
 | 
					
						
							|  |  |  | 				mergedEntries = append(mergedEntries, newEntries...) | 
					
						
							|  |  |  | 				sort.Strings(mergedEntries) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		return mergedEntries | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// listDir - lists all the entries at a given prefix and given entry in the prefix.
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 	listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		for _, disks := range sets { | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 			entries := listDirInternal(bucket, prefixDir, prefixEntry, disks) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			var newEntries []string | 
					
						
							|  |  |  | 			// Find elements in entries which are not in mergedEntries
 | 
					
						
							|  |  |  | 			for _, entry := range entries { | 
					
						
							|  |  |  | 				idx := sort.SearchStrings(mergedEntries, entry) | 
					
						
							|  |  |  | 				// if entry is already present in mergedEntries don't add.
 | 
					
						
							|  |  |  | 				if idx < len(mergedEntries) && mergedEntries[idx] == entry { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				newEntries = append(newEntries, entry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if len(newEntries) > 0 { | 
					
						
							|  |  |  | 				// Merge the entries and sort it.
 | 
					
						
							|  |  |  | 				mergedEntries = append(mergedEntries, newEntries...) | 
					
						
							|  |  |  | 				sort.Strings(mergedEntries) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		return mergedEntries, false | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return listDir | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // listObjectsHeal - wrapper function implemented over file tree walk.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func (s *xlSets) listObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Default is recursive, if delimiter is set then list non recursive.
 | 
					
						
							|  |  |  | 	recursive := true | 
					
						
							|  |  |  | 	if delimiter == slashSeparator { | 
					
						
							|  |  |  | 		recursive = false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// "heal" true for listObjectsHeal() and false for listObjects()
 | 
					
						
							|  |  |  | 	walkResultCh, endWalkCh := s.listPool.Release(listParams{bucket, recursive, marker, prefix, true}) | 
					
						
							|  |  |  | 	if walkResultCh == nil { | 
					
						
							|  |  |  | 		endWalkCh = make(chan struct{}) | 
					
						
							|  |  |  | 		isLeaf := func(bucket, entry string) bool { | 
					
						
							|  |  |  | 			entry = strings.TrimSuffix(entry, slashSeparator) | 
					
						
							|  |  |  | 			// Verify if we are at the leaf, a leaf is where we
 | 
					
						
							|  |  |  | 			// see `xl.json` inside a directory.
 | 
					
						
							|  |  |  | 			return s.getHashedSet(entry).isObject(bucket, entry) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-11 07:53:42 +08:00
										 |  |  | 		isLeafDir := func(bucket, entry string) bool { | 
					
						
							| 
									
										
										
										
											2018-08-17 07:49:38 +08:00
										 |  |  | 			var ok bool | 
					
						
							|  |  |  | 			for _, set := range s.sets { | 
					
						
							|  |  |  | 				ok = set.isObjectDir(bucket, entry) | 
					
						
							|  |  |  | 				if ok { | 
					
						
							|  |  |  | 					return true | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return false | 
					
						
							| 
									
										
										
										
											2018-05-11 07:53:42 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		var setDisks = make([][]StorageAPI, len(s.sets)) | 
					
						
							|  |  |  | 		for _, set := range s.sets { | 
					
						
							|  |  |  | 			setDisks = append(setDisks, set.getLoadBalancedDisks()) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		listDir := listDirSetsHealFactory(isLeaf, setDisks...) | 
					
						
							| 
									
										
										
										
											2018-05-11 07:53:42 +08:00
										 |  |  | 		walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, nil, isLeafDir, endWalkCh) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var objInfos []ObjectInfo | 
					
						
							|  |  |  | 	var eof bool | 
					
						
							|  |  |  | 	var nextMarker string | 
					
						
							|  |  |  | 	for i := 0; i < maxKeys; { | 
					
						
							|  |  |  | 		walkResult, ok := <-walkResultCh | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			// Closed channel.
 | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// For any walk error return right away.
 | 
					
						
							|  |  |  | 		if walkResult.err != nil { | 
					
						
							|  |  |  | 			return loi, toObjectErr(walkResult.err, bucket, prefix) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		var objInfo ObjectInfo | 
					
						
							| 
									
										
										
										
											2018-04-26 02:56:39 +08:00
										 |  |  | 		objInfo.Bucket = bucket | 
					
						
							|  |  |  | 		objInfo.Name = walkResult.entry | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		nextMarker = objInfo.Name | 
					
						
							|  |  |  | 		objInfos = append(objInfos, objInfo) | 
					
						
							|  |  |  | 		i++ | 
					
						
							|  |  |  | 		if walkResult.end { | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	params := listParams{bucket, recursive, nextMarker, prefix, true} | 
					
						
							|  |  |  | 	if !eof { | 
					
						
							|  |  |  | 		s.listPool.Set(params, walkResultCh, endWalkCh) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result := ListObjectsInfo{IsTruncated: !eof} | 
					
						
							|  |  |  | 	for _, objInfo := range objInfos { | 
					
						
							|  |  |  | 		result.NextMarker = objInfo.Name | 
					
						
							| 
									
										
										
										
											2018-05-11 07:53:42 +08:00
										 |  |  | 		result.Objects = append(result.Objects, objInfo) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return result, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // This is not implemented yet, will be implemented later to comply with Admin API refactor.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 		return loi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// With max keys of zero we have reached eof, return right here.
 | 
					
						
							|  |  |  | 	if maxKeys == 0 { | 
					
						
							|  |  |  | 		return loi, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// For delimiter and prefix as '/' we do not list anything at all
 | 
					
						
							|  |  |  | 	// since according to s3 spec we stop at the 'delimiter' along
 | 
					
						
							|  |  |  | 	// with the prefix. On a flat namespace with 'prefix' as '/'
 | 
					
						
							|  |  |  | 	// we don't have any entries, since all the keys are of form 'keyName/...'
 | 
					
						
							|  |  |  | 	if delimiter == slashSeparator && prefix == slashSeparator { | 
					
						
							|  |  |  | 		return loi, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Over flowing count - reset to maxObjectList.
 | 
					
						
							|  |  |  | 	if maxKeys < 0 || maxKeys > maxObjectList { | 
					
						
							|  |  |  | 		maxKeys = maxObjectList | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Initiate a list operation, if successful filter and return quickly.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	listObjInfo, err := s.listObjectsHeal(ctx, bucket, prefix, marker, delimiter, maxKeys) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		// We got the entries successfully return.
 | 
					
						
							|  |  |  | 		return listObjInfo, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Return error at the end.
 | 
					
						
							|  |  |  | 	return loi, toObjectErr(err, bucket, prefix) | 
					
						
							|  |  |  | } |