| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	"context" | 
					
						
							|  |  |  | 	"encoding/gob" | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	"encoding/json" | 
					
						
							| 
									
										
										
										
											2020-11-21 02:43:07 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	"strings" | 
					
						
							|  |  |  | 	"sync" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	jsoniter "github.com/json-iterator/go" | 
					
						
							| 
									
										
										
										
											2022-03-23 03:39:45 +08:00
										 |  |  | 	"github.com/minio/minio/internal/bucket/lifecycle" | 
					
						
							| 
									
										
										
										
											2022-04-12 04:25:32 +08:00
										 |  |  | 	"github.com/minio/minio/internal/bucket/object/lock" | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:30 +08:00
										 |  |  | 	"github.com/minio/minio/internal/bucket/versioning" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/color" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/hash" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/logger" | 
					
						
							| 
									
										
										
										
											2021-05-29 06:17:01 +08:00
										 |  |  | 	"github.com/minio/pkg/console" | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type listPathOptions struct { | 
					
						
							|  |  |  | 	// ID of the listing.
 | 
					
						
							|  |  |  | 	// This will be used to persist the list.
 | 
					
						
							|  |  |  | 	ID string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Bucket of the listing.
 | 
					
						
							|  |  |  | 	Bucket string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Directory inside the bucket.
 | 
					
						
							| 
									
										
										
										
											2023-02-11 02:48:39 +08:00
										 |  |  | 	// When unset listPath will set this based on Prefix
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	BaseDir string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Scan/return only content with prefix.
 | 
					
						
							|  |  |  | 	Prefix string | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-19 02:44:18 +08:00
										 |  |  | 	// FilterPrefix will return only results with this prefix when scanning.
 | 
					
						
							|  |  |  | 	// Should never contain a slash.
 | 
					
						
							|  |  |  | 	// Prefix should still be set.
 | 
					
						
							|  |  |  | 	FilterPrefix string | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	// Marker to resume listing.
 | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | 	// The response will be the first entry >= this object name.
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	Marker string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Limit the number of results.
 | 
					
						
							|  |  |  | 	Limit int | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-26 07:29:45 +08:00
										 |  |  | 	// The number of disks to ask.
 | 
					
						
							|  |  |  | 	AskDisks string | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// InclDeleted will keep all entries where latest version is a delete marker.
 | 
					
						
							|  |  |  | 	InclDeleted bool | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Scan recursively.
 | 
					
						
							|  |  |  | 	// If false only main directory will be scanned.
 | 
					
						
							|  |  |  | 	// Should always be true if Separator is n SlashSeparator.
 | 
					
						
							|  |  |  | 	Recursive bool | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Separator to use.
 | 
					
						
							|  |  |  | 	Separator string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create indicates that the lister should not attempt to load an existing cache.
 | 
					
						
							|  |  |  | 	Create bool | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Include pure directories.
 | 
					
						
							|  |  |  | 	IncludeDirectories bool | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	// Transient is set if the cache is transient due to an error or being a reserved bucket.
 | 
					
						
							|  |  |  | 	// This means the cache metadata will not be persisted on disk.
 | 
					
						
							|  |  |  | 	// A transient result will never be returned from the cache so knowing the list id is required.
 | 
					
						
							|  |  |  | 	Transient bool | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-10 06:59:23 +08:00
										 |  |  | 	// Versioned is this a ListObjectVersions call.
 | 
					
						
							|  |  |  | 	Versioned bool | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:30 +08:00
										 |  |  | 	// Versioning config is used for if the path
 | 
					
						
							|  |  |  | 	// has versioning enabled.
 | 
					
						
							|  |  |  | 	Versioning *versioning.Versioning | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-04-11 02:14:52 +08:00
										 |  |  | 	// Lifecycle performs filtering based on lifecycle.
 | 
					
						
							| 
									
										
										
										
											2022-03-23 03:39:45 +08:00
										 |  |  | 	// This will filter out objects if the most recent version should be deleted by lifecycle.
 | 
					
						
							|  |  |  | 	// Is not transferred across request calls.
 | 
					
						
							| 
									
										
										
										
											2022-04-11 02:14:52 +08:00
										 |  |  | 	Lifecycle *lifecycle.Lifecycle | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-04-12 04:25:32 +08:00
										 |  |  | 	// Retention configuration, needed to be passed along with lifecycle if set.
 | 
					
						
							|  |  |  | 	Retention lock.Retention | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | 	// Replication configuration
 | 
					
						
							|  |  |  | 	Replication replicationConfig | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// StopDiskAtLimit will stop listing on each disk when limit number off objects has been returned.
 | 
					
						
							|  |  |  | 	StopDiskAtLimit bool | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-04-11 02:14:52 +08:00
										 |  |  | 	// pool and set of where the cache is located.
 | 
					
						
							|  |  |  | 	pool, set int | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func init() { | 
					
						
							|  |  |  | 	gob.Register(listPathOptions{}) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | func (o *listPathOptions) setBucketMeta(ctx context.Context) { | 
					
						
							|  |  |  | 	lc, _ := globalLifecycleSys.Get(o.Bucket) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:30 +08:00
										 |  |  | 	vc, _ := globalBucketVersioningSys.Get(o.Bucket) | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Check if bucket is object locked.
 | 
					
						
							|  |  |  | 	rcfg, _ := globalBucketObjectLockSys.Get(o.Bucket) | 
					
						
							|  |  |  | 	replCfg, _, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, o.Bucket) | 
					
						
							|  |  |  | 	tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, o.Bucket) | 
					
						
							|  |  |  | 	o.Lifecycle = lc | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:30 +08:00
										 |  |  | 	o.Versioning = vc | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | 	o.Replication = replicationConfig{ | 
					
						
							|  |  |  | 		Config:  replCfg, | 
					
						
							|  |  |  | 		remotes: tgts, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	o.Retention = rcfg | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-31 00:33:16 +08:00
										 |  |  | // newMetacache constructs a new metacache from the options.
 | 
					
						
							|  |  |  | func (o listPathOptions) newMetacache() metacache { | 
					
						
							|  |  |  | 	return metacache{ | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		id:          o.ID, | 
					
						
							|  |  |  | 		bucket:      o.Bucket, | 
					
						
							|  |  |  | 		root:        o.BaseDir, | 
					
						
							|  |  |  | 		recursive:   o.Recursive, | 
					
						
							|  |  |  | 		status:      scanStateStarted, | 
					
						
							|  |  |  | 		error:       "", | 
					
						
							|  |  |  | 		started:     UTCNow(), | 
					
						
							|  |  |  | 		lastHandout: UTCNow(), | 
					
						
							|  |  |  | 		lastUpdate:  UTCNow(), | 
					
						
							|  |  |  | 		ended:       time.Time{}, | 
					
						
							|  |  |  | 		dataVersion: metacacheStreamVersion, | 
					
						
							|  |  |  | 		filter:      o.FilterPrefix, | 
					
						
							| 
									
										
										
										
											2020-10-31 00:33:16 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-15 05:13:33 +08:00
										 |  |  | func (o *listPathOptions) debugf(format string, data ...interface{}) { | 
					
						
							| 
									
										
										
										
											2020-12-18 08:52:47 +08:00
										 |  |  | 	if serverDebugLog { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 		console.Debugf(format+"\n", data...) | 
					
						
							| 
									
										
										
										
											2020-12-15 05:13:33 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (o *listPathOptions) debugln(data ...interface{}) { | 
					
						
							| 
									
										
										
										
											2020-12-18 08:52:47 +08:00
										 |  |  | 	if serverDebugLog { | 
					
						
							| 
									
										
										
										
											2020-12-15 05:13:33 +08:00
										 |  |  | 		console.Debugln(data...) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | // gatherResults will collect all results on the input channel and filter results according to the options.
 | 
					
						
							|  |  |  | // Caller should close the channel when done.
 | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | // The returned function will return the results once there is enough or input is closed,
 | 
					
						
							|  |  |  | // or the context is canceled.
 | 
					
						
							|  |  |  | func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCacheEntry) func() (metaCacheEntriesSorted, error) { | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	resultsDone := make(chan metaCacheEntriesSorted) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	// Copy so we can mutate
 | 
					
						
							|  |  |  | 	resCh := resultsDone | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | 	var done bool | 
					
						
							|  |  |  | 	var mu sync.Mutex | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	resErr := io.EOF | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	go func() { | 
					
						
							|  |  |  | 		var results metaCacheEntriesSorted | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | 		var returned bool | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		for entry := range in { | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | 			if returned { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 				// past limit
 | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | 			mu.Lock() | 
					
						
							|  |  |  | 			returned = done | 
					
						
							|  |  |  | 			mu.Unlock() | 
					
						
							|  |  |  | 			if returned { | 
					
						
							|  |  |  | 				resCh = nil | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-12-10 06:59:23 +08:00
										 |  |  | 			if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | 			if o.Marker != "" && entry.name < o.Marker { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !strings.HasPrefix(entry.name, o.Prefix) { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-12-09 09:34:52 +08:00
										 |  |  | 			if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() { | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if o.Limit > 0 && results.len() >= o.Limit { | 
					
						
							|  |  |  | 				// We have enough and we have more.
 | 
					
						
							|  |  |  | 				// Do not return io.EOF
 | 
					
						
							|  |  |  | 				if resCh != nil { | 
					
						
							|  |  |  | 					resErr = nil | 
					
						
							|  |  |  | 					resCh <- results | 
					
						
							|  |  |  | 					resCh = nil | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | 					returned = true | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			results.o = append(results.o, entry) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if resCh != nil { | 
					
						
							|  |  |  | 			resErr = io.EOF | 
					
						
							| 
									
										
										
										
											2022-03-23 00:37:01 +08:00
										 |  |  | 			select { | 
					
						
							|  |  |  | 			case <-ctx.Done(): | 
					
						
							|  |  |  | 				// Nobody wants it.
 | 
					
						
							|  |  |  | 			case resCh <- results: | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 	return func() (metaCacheEntriesSorted, error) { | 
					
						
							| 
									
										
										
										
											2021-08-17 02:59:16 +08:00
										 |  |  | 		select { | 
					
						
							|  |  |  | 		case <-ctx.Done(): | 
					
						
							|  |  |  | 			mu.Lock() | 
					
						
							|  |  |  | 			done = true | 
					
						
							|  |  |  | 			mu.Unlock() | 
					
						
							|  |  |  | 			return metaCacheEntriesSorted{}, ctx.Err() | 
					
						
							|  |  |  | 		case r := <-resultsDone: | 
					
						
							|  |  |  | 			return r, resErr | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | // findFirstPart will find the part with 0 being the first that corresponds to the marker in the options.
 | 
					
						
							|  |  |  | // io.ErrUnexpectedEOF is returned if the place containing the marker hasn't been scanned yet.
 | 
					
						
							|  |  |  | // io.EOF indicates the marker is beyond the end of the stream and does not exist.
 | 
					
						
							|  |  |  | func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) { | 
					
						
							|  |  |  | 	search := o.Marker | 
					
						
							|  |  |  | 	if search == "" { | 
					
						
							|  |  |  | 		search = o.Prefix | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if search == "" { | 
					
						
							|  |  |  | 		return 0, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	o.debugln("searching for ", search) | 
					
						
							|  |  |  | 	var tmp metacacheBlock | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	json := jsoniter.ConfigCompatibleWithStandardLibrary | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	i := 0 | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, i) | 
					
						
							|  |  |  | 		v, ok := fi.Metadata[partKey] | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			o.debugln("no match in metadata, waiting") | 
					
						
							|  |  |  | 			return -1, io.ErrUnexpectedEOF | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		err := json.Unmarshal([]byte(v), &tmp) | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			logger.LogIf(context.Background(), err) | 
					
						
							|  |  |  | 			return -1, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if tmp.First == "" && tmp.Last == "" && tmp.EOS { | 
					
						
							|  |  |  | 			return 0, errFileNotFound | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if tmp.First >= search { | 
					
						
							|  |  |  | 			o.debugln("First >= search", v) | 
					
						
							|  |  |  | 			return i, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if tmp.Last >= search { | 
					
						
							|  |  |  | 			o.debugln("Last >= search", v) | 
					
						
							|  |  |  | 			return i, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if tmp.EOS { | 
					
						
							|  |  |  | 			o.debugln("no match, at EOS", v) | 
					
						
							|  |  |  | 			return -3, io.EOF | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		o.debugln("First ", tmp.First, "<", search, " search", i) | 
					
						
							|  |  |  | 		i++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-31 00:33:16 +08:00
										 |  |  | // updateMetacacheListing will update the metacache listing.
 | 
					
						
							|  |  |  | func (o *listPathOptions) updateMetacacheListing(m metacache, rpc *peerRESTClient) (metacache, error) { | 
					
						
							|  |  |  | 	if rpc == nil { | 
					
						
							| 
									
										
										
										
											2020-11-04 04:47:52 +08:00
										 |  |  | 		return localMetacacheMgr.updateCacheEntry(m) | 
					
						
							| 
									
										
										
										
											2020-10-31 00:33:16 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return rpc.UpdateMetacacheListing(context.Background(), m) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | func getMetacacheBlockInfo(fi FileInfo, block int) (*metacacheBlock, error) { | 
					
						
							|  |  |  | 	var tmp metacacheBlock | 
					
						
							|  |  |  | 	partKey := fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, block) | 
					
						
							|  |  |  | 	v, ok := fi.Metadata[partKey] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return nil, io.ErrUnexpectedEOF | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return &tmp, json.Unmarshal([]byte(v), &tmp) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-15 04:07:07 +08:00
										 |  |  | const metacachePrefix = ".metacache" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | func metacachePrefixForID(bucket, id string) string { | 
					
						
							| 
									
										
										
										
											2020-12-15 04:07:07 +08:00
										 |  |  | 	return pathJoin(bucketMetaPrefix, bucket, metacachePrefix, id) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // objectPath returns the object path of the cache.
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | func (o *listPathOptions) objectPath(block int) string { | 
					
						
							|  |  |  | 	return pathJoin(metacachePrefixForID(o.Bucket, o.ID), "block-"+strconv.Itoa(block)+".s2") | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-19 02:44:18 +08:00
										 |  |  | func (o *listPathOptions) SetFilter() { | 
					
						
							|  |  |  | 	switch { | 
					
						
							|  |  |  | 	case metacacheSharePrefix: | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	case o.Prefix == o.BaseDir: | 
					
						
							|  |  |  | 		// No additional prefix
 | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Remove basedir.
 | 
					
						
							|  |  |  | 	o.FilterPrefix = strings.TrimPrefix(o.Prefix, o.BaseDir) | 
					
						
							|  |  |  | 	// Remove leading and trailing slashes.
 | 
					
						
							|  |  |  | 	o.FilterPrefix = strings.Trim(o.FilterPrefix, slashSeparator) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if strings.Contains(o.FilterPrefix, slashSeparator) { | 
					
						
							|  |  |  | 		// Sanity check, should not happen.
 | 
					
						
							|  |  |  | 		o.FilterPrefix = "" | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | // filter will apply the options and return the number of objects requested by the limit.
 | 
					
						
							|  |  |  | // Will return io.EOF if there are no more entries with the same filter.
 | 
					
						
							|  |  |  | // The last entry can be used as a marker to resume the listing.
 | 
					
						
							|  |  |  | func (r *metacacheReader) filter(o listPathOptions) (entries metaCacheEntriesSorted, err error) { | 
					
						
							|  |  |  | 	// Forward to prefix, if any
 | 
					
						
							|  |  |  | 	err = r.forwardTo(o.Prefix) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return entries, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if o.Marker != "" { | 
					
						
							|  |  |  | 		err = r.forwardTo(o.Marker) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return entries, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-15 05:13:33 +08:00
										 |  |  | 	o.debugln("forwarded to ", o.Prefix, "marker:", o.Marker, "sep:", o.Separator) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	// Filter
 | 
					
						
							|  |  |  | 	if !o.Recursive { | 
					
						
							|  |  |  | 		entries.o = make(metaCacheEntries, 0, o.Limit) | 
					
						
							|  |  |  | 		pastPrefix := false | 
					
						
							|  |  |  | 		err := r.readFn(func(entry metaCacheEntry) bool { | 
					
						
							|  |  |  | 			if o.Prefix != "" && !strings.HasPrefix(entry.name, o.Prefix) { | 
					
						
							|  |  |  | 				// We are past the prefix, don't continue.
 | 
					
						
							|  |  |  | 				pastPrefix = true | 
					
						
							|  |  |  | 				return false | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-12-10 06:59:23 +08:00
										 |  |  | 			if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 				return true | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !entry.isInDir(o.Prefix, o.Separator) { | 
					
						
							|  |  |  | 				return true | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-12-09 09:34:52 +08:00
										 |  |  | 			if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() { | 
					
						
							| 
									
										
										
										
											2023-01-31 01:13:53 +08:00
										 |  |  | 				return true | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if entry.isAllFreeVersions() { | 
					
						
							|  |  |  | 				return true | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			entries.o = append(entries.o, entry) | 
					
						
							|  |  |  | 			return entries.len() < o.Limit | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2023-02-17 17:40:31 +08:00
										 |  |  | 		if (err != nil && errors.Is(err, io.EOF)) || pastPrefix || r.nextEOF() { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 			return entries, io.EOF | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return entries, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// We should not need to filter more.
 | 
					
						
							| 
									
										
										
										
											2021-12-10 06:59:23 +08:00
										 |  |  | 	return r.readN(o.Limit, o.InclDeleted, o.IncludeDirectories, o.Versioned, o.Prefix) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOptions) (entries metaCacheEntriesSorted, err error) { | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	retries := 0 | 
					
						
							| 
									
										
										
										
											2022-03-25 18:41:31 +08:00
										 |  |  | 	rpc := globalNotificationSys.restClientFromHash(pathJoin(o.Bucket, o.Prefix)) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 	const ( | 
					
						
							|  |  |  | 		retryDelay    = 50 * time.Millisecond | 
					
						
							|  |  |  | 		retryDelay250 = 250 * time.Millisecond | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	for { | 
					
						
							| 
									
										
										
										
											2021-09-18 05:11:01 +08:00
										 |  |  | 		if contextCanceled(ctx) { | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 			return entries, ctx.Err() | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-11-03 09:20:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 		// If many failures, check the cache state.
 | 
					
						
							|  |  |  | 		if retries > 10 { | 
					
						
							|  |  |  | 			err := o.checkMetacacheState(ctx, rpc) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return entries, fmt.Errorf("remote listing canceled: %w", err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			retries = 1 | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-11-03 09:20:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 		// All operations are performed without locks, so we must be careful and allow for failures.
 | 
					
						
							|  |  |  | 		// Read metadata associated with the object from a disk.
 | 
					
						
							|  |  |  | 		if retries > 0 { | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 			for _, disk := range er.getDisks() { | 
					
						
							|  |  |  | 				if disk == nil { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-09-24 08:24:24 +08:00
										 |  |  | 				if !disk.IsOnline() { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 				_, err := disk.ReadVersion(ctx, minioMetaBucket, | 
					
						
							|  |  |  | 					o.objectPath(0), "", false) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 					time.Sleep(retryDelay250) | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 					retries++ | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				break | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-24 08:24:24 +08:00
										 |  |  | 		// Load first part metadata...
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 		// Read metadata associated with the object from all disks.
 | 
					
						
							|  |  |  | 		fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(0), ObjectOptions{}, true) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			switch toObjectErr(err, minioMetaBucket, o.objectPath(0)).(type) { | 
					
						
							|  |  |  | 			case ObjectNotFound: | 
					
						
							|  |  |  | 				retries++ | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 				if retries == 1 { | 
					
						
							|  |  |  | 					time.Sleep(retryDelay) | 
					
						
							|  |  |  | 				} else { | 
					
						
							|  |  |  | 					time.Sleep(retryDelay250) | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			case InsufficientReadQuorum: | 
					
						
							|  |  |  | 				retries++ | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 				if retries == 1 { | 
					
						
							|  |  |  | 					time.Sleep(retryDelay) | 
					
						
							|  |  |  | 				} else { | 
					
						
							|  |  |  | 					time.Sleep(retryDelay250) | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				return entries, fmt.Errorf("reading first part metadata: %w", err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		partN, err := o.findFirstPart(fi) | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case err == nil: | 
					
						
							|  |  |  | 		case errors.Is(err, io.ErrUnexpectedEOF): | 
					
						
							|  |  |  | 			if retries == 10 { | 
					
						
							|  |  |  | 				err := o.checkMetacacheState(ctx, rpc) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					return entries, fmt.Errorf("remote listing canceled: %w", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				retries = -1 | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			retries++ | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 			time.Sleep(retryDelay250) | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		case errors.Is(err, io.EOF): | 
					
						
							|  |  |  | 			return entries, io.EOF | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-02-25 07:51:41 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 		// We got a stream to start at.
 | 
					
						
							|  |  |  | 		loadedPart := 0 | 
					
						
							|  |  |  | 		for { | 
					
						
							| 
									
										
										
										
											2021-09-18 05:11:01 +08:00
										 |  |  | 			if contextCanceled(ctx) { | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 				return entries, ctx.Err() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if partN != loadedPart { | 
					
						
							|  |  |  | 				if retries > 10 { | 
					
						
							|  |  |  | 					err := o.checkMetacacheState(ctx, rpc) | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						return entries, fmt.Errorf("waiting for next part %d: %w", partN, err) | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					retries = 1 | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 				if retries > 0 { | 
					
						
							|  |  |  | 					// Load from one disk only
 | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 					for _, disk := range er.getDisks() { | 
					
						
							|  |  |  | 						if disk == nil { | 
					
						
							|  |  |  | 							continue | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2021-09-24 08:24:24 +08:00
										 |  |  | 						if !disk.IsOnline() { | 
					
						
							|  |  |  | 							continue | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 						_, err := disk.ReadVersion(ctx, minioMetaBucket, | 
					
						
							|  |  |  | 							o.objectPath(partN), "", false) | 
					
						
							|  |  |  | 						if err != nil { | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 							time.Sleep(retryDelay250) | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 							retries++ | 
					
						
							|  |  |  | 							continue | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						break | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-24 08:24:24 +08:00
										 |  |  | 				// Load partN metadata...
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 				fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, minioMetaBucket, o.objectPath(partN), ObjectOptions{}, true) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 					time.Sleep(retryDelay250) | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 					retries++ | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				loadedPart = partN | 
					
						
							|  |  |  | 				bi, err := getMetacacheBlockInfo(fi, partN) | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 				if err == nil { | 
					
						
							|  |  |  | 					if bi.pastPrefix(o.Prefix) { | 
					
						
							|  |  |  | 						return entries, io.EOF | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-05-21 00:00:11 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			pr, pw := io.Pipe() | 
					
						
							|  |  |  | 			go func() { | 
					
						
							|  |  |  | 				werr := er.getObjectWithFileInfo(ctx, minioMetaBucket, o.objectPath(partN), 0, | 
					
						
							|  |  |  | 					fi.Size, pw, fi, metaArr, onlineDisks) | 
					
						
							|  |  |  | 				pw.CloseWithError(werr) | 
					
						
							|  |  |  | 			}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			tmp := newMetacacheReader(pr) | 
					
						
							|  |  |  | 			e, err := tmp.filter(o) | 
					
						
							|  |  |  | 			pr.CloseWithError(err) | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 			tmp.Close() | 
					
						
							| 
									
										
										
										
											2021-05-21 00:00:11 +08:00
										 |  |  | 			entries.o = append(entries.o, e.o...) | 
					
						
							|  |  |  | 			if o.Limit > 0 && entries.len() > o.Limit { | 
					
						
							|  |  |  | 				entries.truncate(o.Limit) | 
					
						
							|  |  |  | 				return entries, nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if err == nil { | 
					
						
							|  |  |  | 				// We stopped within the listing, we are done for now...
 | 
					
						
							|  |  |  | 				return entries, nil | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2023-02-17 17:40:31 +08:00
										 |  |  | 			if err != nil && !errors.Is(err, io.EOF) { | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 				switch toObjectErr(err, minioMetaBucket, o.objectPath(partN)).(type) { | 
					
						
							|  |  |  | 				case ObjectNotFound: | 
					
						
							|  |  |  | 					retries++ | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 					time.Sleep(retryDelay250) | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 					continue | 
					
						
							|  |  |  | 				case InsufficientReadQuorum: | 
					
						
							|  |  |  | 					retries++ | 
					
						
							| 
									
										
										
										
											2023-03-18 05:44:01 +08:00
										 |  |  | 					time.Sleep(retryDelay250) | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 					continue | 
					
						
							|  |  |  | 				default: | 
					
						
							|  |  |  | 					logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 					return entries, err | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// We finished at the end of the block.
 | 
					
						
							|  |  |  | 			// And should not expect any more results.
 | 
					
						
							|  |  |  | 			bi, err := getMetacacheBlockInfo(fi, partN) | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			if err != nil || bi.EOS { | 
					
						
							|  |  |  | 				// We are done and there are no more parts.
 | 
					
						
							|  |  |  | 				return entries, io.EOF | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if bi.endedPrefix(o.Prefix) { | 
					
						
							|  |  |  | 				// Nothing more for prefix.
 | 
					
						
							|  |  |  | 				return entries, io.EOF | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			partN++ | 
					
						
							|  |  |  | 			retries = 0 | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-26 07:29:45 +08:00
										 |  |  | // getListQuorum interprets list quorum values and returns appropriate
 | 
					
						
							|  |  |  | // acceptable quorum expected for list operations
 | 
					
						
							|  |  |  | func getListQuorum(quorum string, driveCount int) int { | 
					
						
							|  |  |  | 	switch quorum { | 
					
						
							|  |  |  | 	case "disk": | 
					
						
							|  |  |  | 		// smallest possible value, generally meant for testing.
 | 
					
						
							|  |  |  | 		return 1 | 
					
						
							|  |  |  | 	case "reduced": | 
					
						
							|  |  |  | 		return 2 | 
					
						
							|  |  |  | 	case "strict": | 
					
						
							| 
									
										
										
										
											2022-04-07 03:24:21 +08:00
										 |  |  | 		return driveCount | 
					
						
							| 
									
										
										
										
											2022-03-26 07:29:45 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// Defaults to (driveCount+1)/2 drives per set, defaults to "optimal" value
 | 
					
						
							|  |  |  | 	if driveCount > 0 { | 
					
						
							|  |  |  | 		return (driveCount + 1) / 2 | 
					
						
							|  |  |  | 	} // "3" otherwise.
 | 
					
						
							|  |  |  | 	return 3 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | // Will return io.EOF if continuing would not yield more results.
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, results chan<- metaCacheEntry) (err error) { | 
					
						
							|  |  |  | 	defer close(results) | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 	o.debugf(color.Green("listPath:")+" with options: %#v", o) | 
					
						
							| 
									
										
										
										
											2020-11-21 02:43:07 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-04-07 03:24:21 +08:00
										 |  |  | 	// get non-healing disks for listing
 | 
					
						
							|  |  |  | 	disks, _ := er.getOnlineDisksWithHealing() | 
					
						
							| 
									
										
										
										
											2022-03-26 07:29:45 +08:00
										 |  |  | 	askDisks := getListQuorum(o.AskDisks, er.setDriveCount) | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 	var fallbackDisks []StorageAPI | 
					
						
							| 
									
										
										
										
											2020-10-31 00:33:16 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	// Special case: ask all disks if the drive count is 4
 | 
					
						
							| 
									
										
										
										
											2022-04-07 03:24:21 +08:00
										 |  |  | 	if er.setDriveCount == 4 || askDisks > len(disks) { | 
					
						
							|  |  |  | 		askDisks = len(disks) // use all available drives
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-04-07 03:24:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// However many we ask, versions must exist on ~50%
 | 
					
						
							|  |  |  | 	listingQuorum := (askDisks + 1) / 2 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	if askDisks > 0 && len(disks) > askDisks { | 
					
						
							|  |  |  | 		rand.Shuffle(len(disks), func(i, j int) { | 
					
						
							|  |  |  | 			disks[i], disks[j] = disks[j], disks[i] | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 		fallbackDisks = disks[askDisks:] | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		disks = disks[:askDisks] | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-30 00:25:43 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	// How to resolve results.
 | 
					
						
							|  |  |  | 	resolver := metadataResolutionParams{ | 
					
						
							|  |  |  | 		dirQuorum: listingQuorum, | 
					
						
							|  |  |  | 		objQuorum: listingQuorum, | 
					
						
							|  |  |  | 		bucket:    o.Bucket, | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-25 23:50:07 +08:00
										 |  |  | 	// Maximum versions requested for "latest" object
 | 
					
						
							|  |  |  | 	// resolution on versioned buckets, this is to be only
 | 
					
						
							|  |  |  | 	// used when o.Versioned is false
 | 
					
						
							|  |  |  | 	if !o.Versioned { | 
					
						
							|  |  |  | 		resolver.requestedVersions = 1 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 	var limit int | 
					
						
							|  |  |  | 	if o.Limit > 0 && o.StopDiskAtLimit { | 
					
						
							| 
									
										
										
										
											2022-09-15 03:11:27 +08:00
										 |  |  | 		// Over-read by 4 + 1 for every 16 in limit to give some space for resolver,
 | 
					
						
							|  |  |  | 		// allow for truncating the list and know if we have more results.
 | 
					
						
							|  |  |  | 		limit = o.Limit + 4 + (o.Limit / 16) | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	ctxDone := ctx.Done() | 
					
						
							|  |  |  | 	return listPathRaw(ctx, listPathRawOptions{ | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 		disks:         disks, | 
					
						
							|  |  |  | 		fallbackDisks: fallbackDisks, | 
					
						
							|  |  |  | 		bucket:        o.Bucket, | 
					
						
							|  |  |  | 		path:          o.BaseDir, | 
					
						
							|  |  |  | 		recursive:     o.Recursive, | 
					
						
							|  |  |  | 		filterPrefix:  o.FilterPrefix, | 
					
						
							|  |  |  | 		minDisks:      listingQuorum, | 
					
						
							|  |  |  | 		forwardTo:     o.Marker, | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 		perDiskLimit:  limit, | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		agreed: func(entry metaCacheEntry) { | 
					
						
							|  |  |  | 			select { | 
					
						
							|  |  |  | 			case <-ctxDone: | 
					
						
							|  |  |  | 			case results <- entry: | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2022-07-08 04:45:34 +08:00
										 |  |  | 		partial: func(entries metaCacheEntries, errs []error) { | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 			// Results Disagree :-(
 | 
					
						
							|  |  |  | 			entry, ok := entries.resolve(&resolver) | 
					
						
							|  |  |  | 			if ok { | 
					
						
							|  |  |  | 				select { | 
					
						
							|  |  |  | 				case <-ctxDone: | 
					
						
							|  |  |  | 				case results <- *entry: | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | type metaCacheRPC struct { | 
					
						
							|  |  |  | 	o      listPathOptions | 
					
						
							|  |  |  | 	mu     sync.Mutex | 
					
						
							|  |  |  | 	meta   *metacache | 
					
						
							|  |  |  | 	rpc    *peerRESTClient | 
					
						
							|  |  |  | 	cancel context.CancelFunc | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (m *metaCacheRPC) setErr(err string) { | 
					
						
							|  |  |  | 	m.mu.Lock() | 
					
						
							| 
									
										
										
										
											2021-09-14 00:11:39 +08:00
										 |  |  | 	defer m.mu.Unlock() | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	meta := *m.meta | 
					
						
							|  |  |  | 	if meta.status != scanStateError { | 
					
						
							|  |  |  | 		meta.error = err | 
					
						
							|  |  |  | 		meta.status = scanStateError | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		// An error is already set.
 | 
					
						
							|  |  |  | 		return | 
					
						
							| 
									
										
										
										
											2020-12-16 03:25:36 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	meta, _ = m.o.updateMetacacheListing(meta, m.rpc) | 
					
						
							|  |  |  | 	*m.meta = meta | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCacheRPC, entries <-chan metaCacheEntry) (err error) { | 
					
						
							|  |  |  | 	o := mc.o | 
					
						
							|  |  |  | 	o.debugf(color.Green("saveMetaCacheStream:")+" with options: %#v", o) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	metaMu := &mc.mu | 
					
						
							|  |  |  | 	rpc := mc.rpc | 
					
						
							|  |  |  | 	cancel := mc.cancel | 
					
						
							|  |  |  | 	defer func() { | 
					
						
							|  |  |  | 		o.debugln(color.Green("saveMetaCacheStream:")+"err:", err) | 
					
						
							|  |  |  | 		if err != nil && !errors.Is(err, io.EOF) { | 
					
						
							|  |  |  | 			go mc.setErr(err.Error()) | 
					
						
							|  |  |  | 			cancel() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2020-12-16 03:25:36 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	defer cancel() | 
					
						
							|  |  |  | 	// Save continuous updates
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		var err error | 
					
						
							|  |  |  | 		ticker := time.NewTicker(10 * time.Second) | 
					
						
							|  |  |  | 		defer ticker.Stop() | 
					
						
							|  |  |  | 		var exit bool | 
					
						
							|  |  |  | 		for !exit { | 
					
						
							|  |  |  | 			select { | 
					
						
							|  |  |  | 			case <-ticker.C: | 
					
						
							|  |  |  | 			case <-ctx.Done(): | 
					
						
							|  |  |  | 				exit = true | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			metaMu.Lock() | 
					
						
							|  |  |  | 			meta := *mc.meta | 
					
						
							|  |  |  | 			meta, err = o.updateMetacacheListing(meta, rpc) | 
					
						
							| 
									
										
										
										
											2021-09-09 02:06:45 +08:00
										 |  |  | 			if err == nil && time.Since(meta.lastHandout) > metacacheMaxClientWait { | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 				cancel() | 
					
						
							|  |  |  | 				exit = true | 
					
						
							| 
									
										
										
										
											2021-09-09 02:06:45 +08:00
										 |  |  | 				meta.status = scanStateError | 
					
						
							|  |  |  | 				meta.error = fmt.Sprintf("listing canceled since time since last handout was %v ago", time.Since(meta.lastHandout).Round(time.Second)) | 
					
						
							|  |  |  | 				o.debugln(color.Green("saveMetaCacheStream: ") + meta.error) | 
					
						
							|  |  |  | 				meta, err = o.updateMetacacheListing(meta, rpc) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if err == nil { | 
					
						
							|  |  |  | 				*mc.meta = meta | 
					
						
							|  |  |  | 				if meta.status == scanStateError { | 
					
						
							|  |  |  | 					cancel() | 
					
						
							|  |  |  | 					exit = true | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			metaMu.Unlock() | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	const retryDelay = 200 * time.Millisecond | 
					
						
							|  |  |  | 	const maxTries = 5 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	// Keep destination...
 | 
					
						
							|  |  |  | 	// Write results to disk.
 | 
					
						
							|  |  |  | 	bw := newMetacacheBlockWriter(entries, func(b *metacacheBlock) error { | 
					
						
							|  |  |  | 		// if the block is 0 bytes and its a first block skip it.
 | 
					
						
							|  |  |  | 		// skip only this for Transient caches.
 | 
					
						
							|  |  |  | 		if len(b.data) == 0 && b.n == 0 && o.Transient { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-09-09 02:06:45 +08:00
										 |  |  | 		o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n)) | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data))) | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		custom := b.headerKV() | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 		_, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{ | 
					
						
							| 
									
										
										
										
											2021-08-04 04:26:57 +08:00
										 |  |  | 			UserDefined: custom, | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 			mc.setErr(err.Error()) | 
					
						
							|  |  |  | 			cancel() | 
					
						
							|  |  |  | 			return err | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		if b.n == 0 { | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		// Update block 0 metadata.
 | 
					
						
							|  |  |  | 		var retries int | 
					
						
							|  |  |  | 		for { | 
					
						
							|  |  |  | 			meta := b.headerKV() | 
					
						
							|  |  |  | 			fi := FileInfo{ | 
					
						
							|  |  |  | 				Metadata: make(map[string]string, len(meta)), | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 			for k, v := range meta { | 
					
						
							|  |  |  | 				fi.Metadata[k] = v | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 			err := er.updateObjectMeta(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks()) | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 			if err == nil { | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			switch err.(type) { | 
					
						
							|  |  |  | 			case ObjectNotFound: | 
					
						
							|  |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2021-09-01 00:46:42 +08:00
										 |  |  | 			case StorageErr: | 
					
						
							|  |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 			case InsufficientReadQuorum: | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if retries >= maxTries { | 
					
						
							|  |  |  | 				return err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			retries++ | 
					
						
							|  |  |  | 			time.Sleep(retryDelay) | 
					
						
							| 
									
										
										
										
											2020-10-31 00:33:16 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-17 04:35:25 +08:00
										 |  |  | 	// Blocks while consuming entries or an error occurs.
 | 
					
						
							|  |  |  | 	err = bw.Close() | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		mc.setErr(err.Error()) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-09-17 04:35:25 +08:00
										 |  |  | 	metaMu.Lock() | 
					
						
							|  |  |  | 	defer metaMu.Unlock() | 
					
						
							|  |  |  | 	if mc.meta.error != "" { | 
					
						
							|  |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-09-17 04:35:25 +08:00
										 |  |  | 	// Save success
 | 
					
						
							|  |  |  | 	mc.meta.status = scanStateSuccess | 
					
						
							|  |  |  | 	meta, err := o.updateMetacacheListing(*mc.meta, rpc) | 
					
						
							|  |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		*mc.meta = meta | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-09-17 04:35:25 +08:00
										 |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | type listPathRawOptions struct { | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 	disks         []StorageAPI | 
					
						
							|  |  |  | 	fallbackDisks []StorageAPI | 
					
						
							|  |  |  | 	bucket, path  string | 
					
						
							|  |  |  | 	recursive     bool | 
					
						
							| 
									
										
										
										
											2021-02-19 03:06:54 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Only return results with this prefix.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	filterPrefix string | 
					
						
							| 
									
										
										
										
											2021-02-19 03:06:54 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Forward to this prefix before returning results.
 | 
					
						
							|  |  |  | 	forwardTo string | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	// Minimum number of good disks to continue.
 | 
					
						
							|  |  |  | 	// An error will be returned if this many disks returned an error.
 | 
					
						
							|  |  |  | 	minDisks       int | 
					
						
							|  |  |  | 	reportNotFound bool | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 	// perDiskLimit will limit each disk to return n objects.
 | 
					
						
							|  |  |  | 	// If <= 0 all results will be returned until canceled.
 | 
					
						
							|  |  |  | 	perDiskLimit int | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	// Callbacks with results:
 | 
					
						
							|  |  |  | 	// If set to nil, it will not be called.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// agreed is called if all disks agreed.
 | 
					
						
							|  |  |  | 	agreed func(entry metaCacheEntry) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-03 03:29:16 +08:00
										 |  |  | 	// partial will be called when there is disagreement between disks.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	// if disk did not return any result, but also haven't errored
 | 
					
						
							|  |  |  | 	// the entry will be empty and errs will
 | 
					
						
							| 
									
										
										
										
											2022-07-08 04:45:34 +08:00
										 |  |  | 	partial func(entries metaCacheEntries, errs []error) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// finished will be called when all streams have finished and
 | 
					
						
							|  |  |  | 	// more than one disk returned an error.
 | 
					
						
							|  |  |  | 	// Will not be called if everything operates as expected.
 | 
					
						
							|  |  |  | 	finished func(errs []error) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // listPathRaw will list a path on the provided drives.
 | 
					
						
							|  |  |  | // See listPathRawOptions on how results are delivered.
 | 
					
						
							|  |  |  | // Directories are always returned.
 | 
					
						
							|  |  |  | // Cache will be bypassed.
 | 
					
						
							|  |  |  | // Context cancellation will be respected but may take a while to effectuate.
 | 
					
						
							|  |  |  | func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) { | 
					
						
							|  |  |  | 	disks := opts.disks | 
					
						
							|  |  |  | 	if len(disks) == 0 { | 
					
						
							|  |  |  | 		return fmt.Errorf("listPathRaw: 0 drives provided") | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-27 02:18:30 +08:00
										 |  |  | 	// Cancel upstream if we finish before we expect.
 | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(ctx) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-19 02:35:27 +08:00
										 |  |  | 	// Keep track of fallback disks
 | 
					
						
							|  |  |  | 	var fdMu sync.Mutex | 
					
						
							|  |  |  | 	fds := opts.fallbackDisks | 
					
						
							|  |  |  | 	fallback := func(err error) StorageAPI { | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(StorageErr); ok { | 
					
						
							| 
									
										
										
										
											2022-03-19 02:35:27 +08:00
										 |  |  | 			// Attempt to grab a fallback disk
 | 
					
						
							|  |  |  | 			fdMu.Lock() | 
					
						
							|  |  |  | 			defer fdMu.Unlock() | 
					
						
							|  |  |  | 			if len(fds) == 0 { | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			fdsCopy := fds | 
					
						
							|  |  |  | 			for _, fd := range fdsCopy { | 
					
						
							|  |  |  | 				// Grab a fallback disk
 | 
					
						
							|  |  |  | 				fds = fds[1:] | 
					
						
							| 
									
										
										
										
											2022-03-26 07:29:45 +08:00
										 |  |  | 				if fd != nil && fd.IsOnline() { | 
					
						
							| 
									
										
										
										
											2022-03-19 02:35:27 +08:00
										 |  |  | 					return fd | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-03-19 02:35:27 +08:00
										 |  |  | 		// Either no more disks for fallback or
 | 
					
						
							|  |  |  | 		// not a storage error.
 | 
					
						
							|  |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	askDisks := len(disks) | 
					
						
							| 
									
										
										
										
											2020-12-25 15:04:03 +08:00
										 |  |  | 	readers := make([]*metacacheReader, askDisks) | 
					
						
							| 
									
										
										
										
											2023-02-17 17:42:43 +08:00
										 |  |  | 	defer func() { | 
					
						
							|  |  |  | 		for _, r := range readers { | 
					
						
							|  |  |  | 			r.Close() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	for i := range disks { | 
					
						
							|  |  |  | 		r, w := io.Pipe() | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 		// Make sure we close the pipe so blocked writes doesn't stay around.
 | 
					
						
							|  |  |  | 		defer r.CloseWithError(context.Canceled) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-21 00:00:11 +08:00
										 |  |  | 		readers[i] = newMetacacheReader(r) | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 		d := disks[i] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		// Send request to each disk.
 | 
					
						
							|  |  |  | 		go func() { | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 			var werr error | 
					
						
							|  |  |  | 			if d == nil { | 
					
						
							|  |  |  | 				werr = errDiskNotFound | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				werr = d.WalkDir(ctx, WalkDirOptions{ | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 					Limit:          opts.perDiskLimit, | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 					Bucket:         opts.bucket, | 
					
						
							|  |  |  | 					BaseDir:        opts.path, | 
					
						
							|  |  |  | 					Recursive:      opts.recursive, | 
					
						
							|  |  |  | 					ReportNotFound: opts.reportNotFound, | 
					
						
							|  |  |  | 					FilterPrefix:   opts.filterPrefix, | 
					
						
							|  |  |  | 					ForwardTo:      opts.forwardTo, | 
					
						
							|  |  |  | 				}, w) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// fallback only when set.
 | 
					
						
							| 
									
										
										
										
											2022-05-23 21:28:46 +08:00
										 |  |  | 			for { | 
					
						
							|  |  |  | 				fd := fallback(werr) | 
					
						
							|  |  |  | 				if fd == nil { | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 				// This fallback is only set when
 | 
					
						
							|  |  |  | 				// askDisks is less than total
 | 
					
						
							|  |  |  | 				// number of disks per set.
 | 
					
						
							| 
									
										
										
										
											2022-03-19 02:35:27 +08:00
										 |  |  | 				werr = fd.WalkDir(ctx, WalkDirOptions{ | 
					
						
							| 
									
										
										
										
											2022-09-09 23:13:06 +08:00
										 |  |  | 					Limit:          opts.perDiskLimit, | 
					
						
							| 
									
										
										
										
											2022-03-19 02:35:27 +08:00
										 |  |  | 					Bucket:         opts.bucket, | 
					
						
							|  |  |  | 					BaseDir:        opts.path, | 
					
						
							|  |  |  | 					Recursive:      opts.recursive, | 
					
						
							|  |  |  | 					ReportNotFound: opts.reportNotFound, | 
					
						
							|  |  |  | 					FilterPrefix:   opts.filterPrefix, | 
					
						
							|  |  |  | 					ForwardTo:      opts.forwardTo, | 
					
						
							|  |  |  | 				}, w) | 
					
						
							|  |  |  | 				if werr == nil { | 
					
						
							|  |  |  | 					break | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-01-08 01:52:53 +08:00
										 |  |  | 			w.CloseWithError(werr) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		}() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	topEntries := make(metaCacheEntries, len(readers)) | 
					
						
							|  |  |  | 	errs := make([]error, len(readers)) | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		// Get the top entry from each
 | 
					
						
							|  |  |  | 		var current metaCacheEntry | 
					
						
							|  |  |  | 		var atEOF, fnf, hasErr, agree int | 
					
						
							|  |  |  | 		for i := range topEntries { | 
					
						
							|  |  |  | 			topEntries[i] = metaCacheEntry{} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-25 13:03:38 +08:00
										 |  |  | 		if contextCanceled(ctx) { | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			return ctx.Err() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for i, r := range readers { | 
					
						
							|  |  |  | 			if errs[i] != nil { | 
					
						
							|  |  |  | 				hasErr++ | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			entry, err := r.peek() | 
					
						
							|  |  |  | 			switch err { | 
					
						
							|  |  |  | 			case io.EOF: | 
					
						
							|  |  |  | 				atEOF++ | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			case nil: | 
					
						
							|  |  |  | 			default: | 
					
						
							| 
									
										
										
										
											2021-09-24 08:24:24 +08:00
										 |  |  | 				switch err.Error() { | 
					
						
							|  |  |  | 				case errFileNotFound.Error(), | 
					
						
							|  |  |  | 					errVolumeNotFound.Error(), | 
					
						
							|  |  |  | 					errUnformattedDisk.Error(), | 
					
						
							|  |  |  | 					errDiskNotFound.Error(): | 
					
						
							| 
									
										
										
										
											2021-01-08 01:52:53 +08:00
										 |  |  | 					atEOF++ | 
					
						
							|  |  |  | 					fnf++ | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 				hasErr++ | 
					
						
							|  |  |  | 				errs[i] = err | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// If no current, add it.
 | 
					
						
							|  |  |  | 			if current.name == "" { | 
					
						
							|  |  |  | 				topEntries[i] = entry | 
					
						
							|  |  |  | 				current = entry | 
					
						
							|  |  |  | 				agree++ | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// If exact match, we agree.
 | 
					
						
							| 
									
										
										
										
											2021-12-03 03:29:16 +08:00
										 |  |  | 			if _, ok := current.matches(&entry, true); ok { | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 				topEntries[i] = entry | 
					
						
							|  |  |  | 				agree++ | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// If only the name matches we didn't agree, but add it for resolution.
 | 
					
						
							|  |  |  | 			if entry.name == current.name { | 
					
						
							|  |  |  | 				topEntries[i] = entry | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// We got different entries
 | 
					
						
							|  |  |  | 			if entry.name > current.name { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// We got a new, better current.
 | 
					
						
							|  |  |  | 			// Clear existing entries.
 | 
					
						
							|  |  |  | 			for i := range topEntries[:i] { | 
					
						
							|  |  |  | 				topEntries[i] = metaCacheEntry{} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			agree = 1 | 
					
						
							|  |  |  | 			current = entry | 
					
						
							|  |  |  | 			topEntries[i] = entry | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Stop if we exceed number of bad disks
 | 
					
						
							|  |  |  | 		if hasErr > len(disks)-opts.minDisks && hasErr > 0 { | 
					
						
							|  |  |  | 			if opts.finished != nil { | 
					
						
							|  |  |  | 				opts.finished(errs) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			var combinedErr []string | 
					
						
							|  |  |  | 			for i, err := range errs { | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							| 
									
										
										
										
											2021-07-26 23:01:41 +08:00
										 |  |  | 					if disks[i] != nil { | 
					
						
							|  |  |  | 						combinedErr = append(combinedErr, | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 							fmt.Sprintf("drive %s returned: %s", disks[i], err)) | 
					
						
							| 
									
										
										
										
											2021-07-26 23:01:41 +08:00
										 |  |  | 					} else { | 
					
						
							|  |  |  | 						combinedErr = append(combinedErr, err.Error()) | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return errors.New(strings.Join(combinedErr, ", ")) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Break if all at EOF or error.
 | 
					
						
							|  |  |  | 		if atEOF+hasErr == len(readers) { | 
					
						
							|  |  |  | 			if hasErr > 0 && opts.finished != nil { | 
					
						
							|  |  |  | 				opts.finished(errs) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if fnf == len(readers) { | 
					
						
							|  |  |  | 			return errFileNotFound | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if agree == len(readers) { | 
					
						
							|  |  |  | 			// Everybody agreed
 | 
					
						
							|  |  |  | 			for _, r := range readers { | 
					
						
							|  |  |  | 				r.skip(1) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if opts.agreed != nil { | 
					
						
							|  |  |  | 				opts.agreed(current) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if opts.partial != nil { | 
					
						
							| 
									
										
										
										
											2022-07-08 04:45:34 +08:00
										 |  |  | 			opts.partial(topEntries, errs) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// Skip the inputs we used.
 | 
					
						
							|  |  |  | 		for i, r := range readers { | 
					
						
							|  |  |  | 			if topEntries[i].name != "" { | 
					
						
							|  |  |  | 				r.skip(1) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } |