| 
									
										
										
										
											2016-04-30 08:52:17 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  |  * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. | 
					
						
							| 
									
										
										
										
											2016-04-30 08:52:17 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-04-30 08:52:17 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-25 16:33:39 +08:00
										 |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-03-16 04:27:16 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2019-07-06 05:06:12 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2019-07-06 05:06:12 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-23 10:18:22 +08:00
										 |  |  | 	humanize "github.com/dustin/go-humanize" | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/sync/errgroup" | 
					
						
							| 
									
										
										
										
											2016-05-25 16:33:39 +08:00
										 |  |  | ) | 
					
						
							| 
									
										
										
										
											2016-04-30 08:52:17 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-30 06:38:14 +08:00
										 |  |  | const ( | 
					
						
							|  |  |  | 	// Block size used for all internal operations version 1.
 | 
					
						
							| 
									
										
											  
											
												[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
											
										 
											2021-03-07 06:09:34 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// TLDR..
 | 
					
						
							|  |  |  | 	// Not used anymore xl.meta captures the right blockSize
 | 
					
						
							|  |  |  | 	// so blockSizeV2 should be used for all future purposes.
 | 
					
						
							|  |  |  | 	// this value is kept here to calculate the max API
 | 
					
						
							|  |  |  | 	// requests based on RAM size for existing content.
 | 
					
						
							| 
									
										
										
										
											2016-11-23 10:18:22 +08:00
										 |  |  | 	blockSizeV1 = 10 * humanize.MiByte | 
					
						
							| 
									
										
										
										
											2016-06-25 18:03:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
											  
											
												[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
											
										 
											2021-03-07 06:09:34 +08:00
										 |  |  | 	// Block size used in erasure coding version 2.
 | 
					
						
							|  |  |  | 	blockSizeV2 = 1 * humanize.MiByte | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 08:31:14 +08:00
										 |  |  | 	// Buckets meta prefix.
 | 
					
						
							|  |  |  | 	bucketMetaPrefix = "buckets" | 
					
						
							| 
									
										
										
										
											2017-04-11 10:51:23 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 	// ETag (hex encoded md5sum) of empty string.
 | 
					
						
							|  |  |  | 	emptyETag = "d41d8cd98f00b204e9800998ecf8427e" | 
					
						
							| 
									
										
										
										
											2016-05-30 06:38:14 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-10 14:03:10 +08:00
										 |  |  | // Global object layer mutex, used for safely updating object layer.
 | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | var globalObjLayerMutex sync.RWMutex | 
					
						
							| 
									
										
										
										
											2016-10-10 14:03:10 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-31 14:39:09 +08:00
										 |  |  | // Global object layer, only accessed by globalObjectAPI.
 | 
					
						
							| 
									
										
										
										
											2016-10-10 14:03:10 +08:00
										 |  |  | var globalObjectAPI ObjectLayer | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | //Global cacheObjects, only accessed by newCacheObjectsFn().
 | 
					
						
							|  |  |  | var globalCacheObjectAPI CacheObjectLayer | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-21 08:33:01 +08:00
										 |  |  | // Checks if the object is a directory, this logic uses
 | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | // if size == 0 and object ends with SlashSeparator then
 | 
					
						
							| 
									
										
										
										
											2017-01-21 08:33:01 +08:00
										 |  |  | // returns true.
 | 
					
						
							|  |  |  | func isObjectDir(object string, size int64) bool { | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 	return HasSuffix(object, SlashSeparator) && size == 0 | 
					
						
							| 
									
										
										
										
											2017-01-21 08:33:01 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-27 01:29:29 +08:00
										 |  |  | func newStorageAPIWithoutHealthCheck(endpoint Endpoint) (storage StorageAPI, err error) { | 
					
						
							|  |  |  | 	if endpoint.IsLocal { | 
					
						
							|  |  |  | 		storage, err := newXLStorage(endpoint) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-17 11:06:57 +08:00
										 |  |  | 		return newXLStorageDiskIDCheck(storage), nil | 
					
						
							| 
									
										
										
										
											2020-10-27 01:29:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return newStorageRESTClient(endpoint, false), nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 16:49:46 +08:00
										 |  |  | // Depending on the disk type network or local, initialize storage API.
 | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) { | 
					
						
							|  |  |  | 	if endpoint.IsLocal { | 
					
						
							| 
									
										
										
										
											2020-08-26 01:55:15 +08:00
										 |  |  | 		storage, err := newXLStorage(endpoint) | 
					
						
							| 
									
										
										
										
											2019-10-26 01:37:53 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-17 11:06:57 +08:00
										 |  |  | 		return newXLStorageDiskIDCheck(storage), nil | 
					
						
							| 
									
										
										
										
											2016-06-02 16:49:46 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-04-12 06:44:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-27 01:29:29 +08:00
										 |  |  | 	return newStorageRESTClient(endpoint, true), nil | 
					
						
							| 
									
										
										
										
											2016-06-02 16:49:46 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) { | 
					
						
							| 
									
										
										
										
											2019-07-06 05:06:12 +08:00
										 |  |  | 	endWalkCh := make(chan struct{}) | 
					
						
							|  |  |  | 	defer close(endWalkCh) | 
					
						
							|  |  |  | 	recursive := true | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | 	walkResultCh := startTreeWalk(ctx, bucket, prefix, "", recursive, listDir, isLeaf, isLeafDir, endWalkCh) | 
					
						
							| 
									
										
										
										
											2019-07-06 05:06:12 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	var objInfos []ObjectInfo | 
					
						
							|  |  |  | 	var eof bool | 
					
						
							|  |  |  | 	var prevPrefix string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		if len(objInfos) == maxKeys { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		result, ok := <-walkResultCh | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		var objInfo ObjectInfo | 
					
						
							|  |  |  | 		var err error | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		index := strings.Index(strings.TrimPrefix(result.entry, prefix), delimiter) | 
					
						
							|  |  |  | 		if index == -1 { | 
					
						
							|  |  |  | 			objInfo, err = getObjInfo(ctx, bucket, result.entry) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				// Ignore errFileNotFound as the object might have got
 | 
					
						
							|  |  |  | 				// deleted in the interim period of listing and getObjectInfo(),
 | 
					
						
							|  |  |  | 				// ignore quorum error as it might be an entry from an outdated disk.
 | 
					
						
							|  |  |  | 				if IsErrIgnored(err, []error{ | 
					
						
							|  |  |  | 					errFileNotFound, | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					errErasureReadQuorum, | 
					
						
							| 
									
										
										
										
											2019-07-06 05:06:12 +08:00
										 |  |  | 				}...) { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return loi, toObjectErr(err, bucket, prefix) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			index = len(prefix) + index + len(delimiter) | 
					
						
							|  |  |  | 			currPrefix := result.entry[:index] | 
					
						
							|  |  |  | 			if currPrefix == prevPrefix { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			prevPrefix = currPrefix | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			objInfo = ObjectInfo{ | 
					
						
							|  |  |  | 				Bucket: bucket, | 
					
						
							|  |  |  | 				Name:   currPrefix, | 
					
						
							|  |  |  | 				IsDir:  true, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if objInfo.Name <= marker { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		objInfos = append(objInfos, objInfo) | 
					
						
							|  |  |  | 		if result.end { | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result := ListObjectsInfo{} | 
					
						
							|  |  |  | 	for _, objInfo := range objInfos { | 
					
						
							|  |  |  | 		if objInfo.IsDir { | 
					
						
							|  |  |  | 			result.Prefixes = append(result.Prefixes, objInfo.Name) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		result.Objects = append(result.Objects, objInfo) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !eof { | 
					
						
							|  |  |  | 		result.IsTruncated = true | 
					
						
							|  |  |  | 		if len(objInfos) > 0 { | 
					
						
							|  |  |  | 			result.NextMarker = objInfos[len(objInfos)-1].Name | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return result, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | // Walk a bucket, optionally prefix recursively, until we have returned
 | 
					
						
							|  |  |  | // all the content to objectInfo channel, it is callers responsibility
 | 
					
						
							|  |  |  | // to allocate a receive channel for ObjectInfo, upon any unhandled
 | 
					
						
							|  |  |  | // error walker returns error. Optionally if context.Done() is received
 | 
					
						
							|  |  |  | // then Walk() stops the walker.
 | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | func fsWalk(ctx context.Context, obj ObjectLayer, bucket, prefix string, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, results chan<- ObjectInfo, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) error { | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 	if err := checkListObjsArgs(ctx, bucket, prefix, "", obj); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-26 11:58:58 +08:00
										 |  |  | 		// Upon error close the channel.
 | 
					
						
							|  |  |  | 		close(results) | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | 	walkResultCh := startTreeWalk(ctx, bucket, prefix, "", true, listDir, isLeaf, isLeafDir, ctx.Done()) | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	go func() { | 
					
						
							|  |  |  | 		defer close(results) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for { | 
					
						
							|  |  |  | 			walkResult, ok := <-walkResultCh | 
					
						
							|  |  |  | 			if !ok { | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			var objInfo ObjectInfo | 
					
						
							|  |  |  | 			var err error | 
					
						
							|  |  |  | 			if HasSuffix(walkResult.entry, SlashSeparator) { | 
					
						
							|  |  |  | 				for _, getObjectInfoDir := range getObjectInfoDirs { | 
					
						
							|  |  |  | 					objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry) | 
					
						
							|  |  |  | 					if err == nil { | 
					
						
							|  |  |  | 						break | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					if err == errFileNotFound { | 
					
						
							|  |  |  | 						err = nil | 
					
						
							|  |  |  | 						objInfo = ObjectInfo{ | 
					
						
							|  |  |  | 							Bucket: bucket, | 
					
						
							|  |  |  | 							Name:   walkResult.entry, | 
					
						
							|  |  |  | 							IsDir:  true, | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				objInfo, err = getObjInfo(ctx, bucket, walkResult.entry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			results <- objInfo | 
					
						
							|  |  |  | 			if walkResult.end { | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) { | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | 	if delimiter != SlashSeparator && delimiter != "" { | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | 		return listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, isLeaf, isLeafDir, getObjInfo, getObjectInfoDirs...) | 
					
						
							| 
									
										
										
										
											2019-07-06 05:06:12 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 	if err := checkListObjsArgs(ctx, bucket, prefix, marker, obj); err != nil { | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		return loi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Marker is set validate pre-condition.
 | 
					
						
							|  |  |  | 	if marker != "" { | 
					
						
							|  |  |  | 		// Marker not common with prefix is not implemented. Send an empty response
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 		if !HasPrefix(marker, prefix) { | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 			return loi, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// With max keys of zero we have reached eof, return right here.
 | 
					
						
							|  |  |  | 	if maxKeys == 0 { | 
					
						
							|  |  |  | 		return loi, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// For delimiter and prefix as '/' we do not list anything at all
 | 
					
						
							|  |  |  | 	// since according to s3 spec we stop at the 'delimiter'
 | 
					
						
							|  |  |  | 	// along // with the prefix. On a flat namespace with 'prefix'
 | 
					
						
							|  |  |  | 	// as '/' we don't have any entries, since all the keys are
 | 
					
						
							|  |  |  | 	// of form 'keyName/...'
 | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | 	if delimiter == SlashSeparator && prefix == SlashSeparator { | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		return loi, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Over flowing count - reset to maxObjectList.
 | 
					
						
							|  |  |  | 	if maxKeys < 0 || maxKeys > maxObjectList { | 
					
						
							|  |  |  | 		maxKeys = maxObjectList | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Default is recursive, if delimiter is set then list non recursive.
 | 
					
						
							|  |  |  | 	recursive := true | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | 	if delimiter == SlashSeparator { | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		recursive = false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-01-30 19:50:07 +08:00
										 |  |  | 	walkResultCh, endWalkCh := tpool.Release(listParams{bucket, recursive, marker, prefix}) | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 	if walkResultCh == nil { | 
					
						
							|  |  |  | 		endWalkCh = make(chan struct{}) | 
					
						
							| 
									
										
										
										
											2020-08-26 03:26:48 +08:00
										 |  |  | 		walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh) | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var eof bool | 
					
						
							|  |  |  | 	var nextMarker string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// List until maxKeys requested.
 | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 	g := errgroup.WithNErrs(maxKeys).WithConcurrency(10) | 
					
						
							|  |  |  | 	ctx, cancel := g.WithCancelOnError(ctx) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objInfoFound := make([]*ObjectInfo, maxKeys) | 
					
						
							|  |  |  | 	var i int | 
					
						
							|  |  |  | 	for i = 0; i < maxKeys; i++ { | 
					
						
							|  |  |  | 		i := i | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		walkResult, ok := <-walkResultCh | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			// Closed channel.
 | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 		if HasSuffix(walkResult.entry, SlashSeparator) { | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 			g.Go(func() error { | 
					
						
							|  |  |  | 				for _, getObjectInfoDir := range getObjectInfoDirs { | 
					
						
							|  |  |  | 					objInfo, err := getObjectInfoDir(ctx, bucket, walkResult.entry) | 
					
						
							|  |  |  | 					if err == nil { | 
					
						
							|  |  |  | 						objInfoFound[i] = &objInfo | 
					
						
							|  |  |  | 						// Done...
 | 
					
						
							|  |  |  | 						return nil | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 					// Add temp, may be overridden,
 | 
					
						
							|  |  |  | 					if err == errFileNotFound { | 
					
						
							|  |  |  | 						objInfoFound[i] = &ObjectInfo{ | 
					
						
							|  |  |  | 							Bucket: bucket, | 
					
						
							|  |  |  | 							Name:   walkResult.entry, | 
					
						
							|  |  |  | 							IsDir:  true, | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return toObjectErr(err, bucket, prefix) | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 				return nil | 
					
						
							|  |  |  | 			}, i) | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 			g.Go(func() error { | 
					
						
							|  |  |  | 				objInfo, err := getObjInfo(ctx, bucket, walkResult.entry) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					// Ignore errFileNotFound as the object might have got
 | 
					
						
							|  |  |  | 					// deleted in the interim period of listing and getObjectInfo(),
 | 
					
						
							|  |  |  | 					// ignore quorum error as it might be an entry from an outdated disk.
 | 
					
						
							|  |  |  | 					if IsErrIgnored(err, []error{ | 
					
						
							|  |  |  | 						errFileNotFound, | 
					
						
							|  |  |  | 						errErasureReadQuorum, | 
					
						
							|  |  |  | 					}...) { | 
					
						
							|  |  |  | 						return nil | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return toObjectErr(err, bucket, prefix) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				objInfoFound[i] = &objInfo | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			}, i) | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 		if walkResult.end { | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-09 03:30:43 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if err := g.WaitErr(); err != nil { | 
					
						
							|  |  |  | 		return loi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Copy found objects
 | 
					
						
							|  |  |  | 	objInfos := make([]ObjectInfo, 0, i+1) | 
					
						
							|  |  |  | 	for _, objInfo := range objInfoFound { | 
					
						
							|  |  |  | 		if objInfo == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		objInfos = append(objInfos, *objInfo) | 
					
						
							|  |  |  | 		nextMarker = objInfo.Name | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Save list routine for the next marker if we haven't reached EOF.
 | 
					
						
							| 
									
										
										
										
											2020-01-30 19:50:07 +08:00
										 |  |  | 	params := listParams{bucket, recursive, nextMarker, prefix} | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 	if !eof { | 
					
						
							|  |  |  | 		tpool.Set(params, walkResultCh, endWalkCh) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result := ListObjectsInfo{} | 
					
						
							|  |  |  | 	for _, objInfo := range objInfos { | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | 		if objInfo.IsDir && delimiter == SlashSeparator { | 
					
						
							| 
									
										
										
										
											2019-04-18 00:52:08 +08:00
										 |  |  | 			result.Prefixes = append(result.Prefixes, objInfo.Name) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		result.Objects = append(result.Objects, objInfo) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !eof { | 
					
						
							|  |  |  | 		result.IsTruncated = true | 
					
						
							|  |  |  | 		if len(objInfos) > 0 { | 
					
						
							|  |  |  | 			result.NextMarker = objInfos[len(objInfos)-1].Name | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Success.
 | 
					
						
							|  |  |  | 	return result, nil | 
					
						
							|  |  |  | } |