| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2021-10-02 02:50:00 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-09-17 12:14:35 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							|  |  |  | 	"math/rand" | 
					
						
							|  |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2020-12-16 09:34:54 +08:00
										 |  |  | 	"sort" | 
					
						
							| 
									
										
										
										
											2020-08-14 06:21:20 +08:00
										 |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2022-04-19 23:20:48 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2019-12-12 22:02:37 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-11-12 11:40:45 +08:00
										 |  |  | 	"github.com/dustin/go-humanize" | 
					
						
							| 
									
										
										
										
											2022-12-24 12:55:45 +08:00
										 |  |  | 	"github.com/google/uuid" | 
					
						
							| 
									
										
										
										
											2023-06-20 08:53:08 +08:00
										 |  |  | 	"github.com/minio/madmin-go/v3" | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	"github.com/minio/minio-go/v7/pkg/s3utils" | 
					
						
							| 
									
										
										
										
											2020-07-15 00:38:05 +08:00
										 |  |  | 	"github.com/minio/minio-go/v7/pkg/set" | 
					
						
							|  |  |  | 	"github.com/minio/minio-go/v7/pkg/tags" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/config/storageclass" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/logger" | 
					
						
							| 
									
										
										
										
											2023-04-26 13:57:40 +08:00
										 |  |  | 	"github.com/minio/pkg/sync/errgroup" | 
					
						
							| 
									
										
										
										
											2021-05-29 06:17:01 +08:00
										 |  |  | 	"github.com/minio/pkg/wildcard" | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | type erasureServerPools struct { | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	poolMetaMutex sync.RWMutex | 
					
						
							|  |  |  | 	poolMeta      poolMeta | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	rebalMu   sync.RWMutex | 
					
						
							|  |  |  | 	rebalMeta *rebalanceMeta | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-24 12:55:45 +08:00
										 |  |  | 	deploymentID     [16]byte | 
					
						
							|  |  |  | 	distributionAlgo string | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 	serverPools []*erasureSets | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	// Active decommission canceler
 | 
					
						
							|  |  |  | 	decommissionCancelers []context.CancelFunc | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	s3Peer *S3PeerSys | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | func (z *erasureServerPools) SinglePool() bool { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	return len(z.serverPools) == 1 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | // Initialize new pool of erasure sets.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) { | 
					
						
							| 
									
										
										
										
											2019-11-21 20:24:51 +08:00
										 |  |  | 	var ( | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 		deploymentID       string | 
					
						
							|  |  |  | 		distributionAlgo   string | 
					
						
							|  |  |  | 		commonParityDrives int | 
					
						
							|  |  |  | 		err                error | 
					
						
							| 
									
										
										
										
											2019-11-21 20:24:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		formats      = make([]*formatErasureV3, len(endpointServerPools)) | 
					
						
							|  |  |  | 		storageDisks = make([][]StorageAPI, len(endpointServerPools)) | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		z            = &erasureServerPools{ | 
					
						
							|  |  |  | 			serverPools: make([]*erasureSets, len(endpointServerPools)), | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 			s3Peer:      NewS3PeerSys(endpointServerPools), | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-21 20:24:51 +08:00
										 |  |  | 	) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-25 03:28:45 +08:00
										 |  |  | 	var localDrives []StorageAPI | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	local := endpointServerPools.FirstLocal() | 
					
						
							|  |  |  | 	for i, ep := range endpointServerPools { | 
					
						
							| 
									
										
										
										
											2021-01-30 03:40:55 +08:00
										 |  |  | 		// If storage class is not set during startup, default values are used
 | 
					
						
							|  |  |  | 		// -- Default for Reduced Redundancy Storage class is, parity = 2
 | 
					
						
							|  |  |  | 		// -- Default for Standard Storage class is, parity = 2 - disks 4, 5
 | 
					
						
							|  |  |  | 		// -- Default for Standard Storage class is, parity = 3 - disks 6, 7
 | 
					
						
							|  |  |  | 		// -- Default for Standard Storage class is, parity = 4 - disks 8 to 16
 | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 		if commonParityDrives == 0 { | 
					
						
							| 
									
										
										
										
											2023-01-10 15:07:45 +08:00
										 |  |  | 			commonParityDrives, err = ecDrivesNoConfig(ep.DrivesPerSet) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return nil, err | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-30 03:40:55 +08:00
										 |  |  | 		if err = storageclass.ValidateParity(commonParityDrives, ep.DrivesPerSet); err != nil { | 
					
						
							| 
									
										
										
										
											2023-01-10 15:07:45 +08:00
										 |  |  | 			return nil, fmt.Errorf("parity validation returned an error: %w <- (%d, %d), for pool(%s)", err, commonParityDrives, ep.DrivesPerSet, humanize.Ordinal(i+1)) | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1, | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 			ep.SetCount, ep.DrivesPerSet, deploymentID, distributionAlgo) | 
					
						
							| 
									
										
										
										
											2019-11-21 20:24:51 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-25 03:28:45 +08:00
										 |  |  | 		for _, storageDisk := range storageDisks[i] { | 
					
						
							|  |  |  | 			if storageDisk != nil && storageDisk.IsLocal() { | 
					
						
							|  |  |  | 				localDrives = append(localDrives, storageDisk) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-21 20:24:51 +08:00
										 |  |  | 		if deploymentID == "" { | 
					
						
							| 
									
										
										
										
											2022-07-05 22:37:24 +08:00
										 |  |  | 			// all pools should have same deployment ID
 | 
					
						
							| 
									
										
										
										
											2019-11-21 20:24:51 +08:00
										 |  |  | 			deploymentID = formats[i].ID | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if distributionAlgo == "" { | 
					
						
							|  |  |  | 			distributionAlgo = formats[i].Erasure.DistributionAlgo | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Validate if users brought different DeploymentID pools.
 | 
					
						
							|  |  |  | 		if deploymentID != formats[i].ID { | 
					
						
							| 
									
										
										
										
											2022-11-12 11:40:45 +08:00
										 |  |  | 			return nil, fmt.Errorf("all pools must have same deployment ID - expected %s, got %s for pool(%s)", deploymentID, formats[i].ID, humanize.Ordinal(i+1)) | 
					
						
							| 
									
										
										
										
											2021-01-20 02:01:31 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		z.serverPools[i], err = newErasureSets(ctx, ep, storageDisks[i], formats[i], commonParityDrives, i) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-12-24 12:55:45 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if deploymentID != "" && bytes.Equal(z.deploymentID[:], []byte{}) { | 
					
						
							|  |  |  | 			z.deploymentID = uuid.MustParse(deploymentID) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if distributionAlgo != "" && z.distributionAlgo == "" { | 
					
						
							|  |  |  | 			z.distributionAlgo = distributionAlgo | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	z.decommissionCancelers = make([]context.CancelFunc, len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2023-06-21 00:28:23 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// initialize the object layer.
 | 
					
						
							|  |  |  | 	setObjectLayer(z) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	r := rand.New(rand.NewSource(time.Now().UnixNano())) | 
					
						
							|  |  |  | 	for { | 
					
						
							| 
									
										
										
										
											2022-01-25 03:28:45 +08:00
										 |  |  | 		err := z.Init(ctx) // Initializes all pools.
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			if !configRetriableErrors(err) { | 
					
						
							|  |  |  | 				logger.Fatal(err, "Unable to initialize backend") | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2022-03-21 05:46:43 +08:00
										 |  |  | 			retry := time.Duration(r.Float64() * float64(5*time.Second)) | 
					
						
							|  |  |  | 			logger.LogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry)) | 
					
						
							|  |  |  | 			time.Sleep(retry) | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		break | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-13 21:04:20 +08:00
										 |  |  | 	globalLocalDrivesMu.Lock() | 
					
						
							| 
									
										
										
										
											2022-01-25 03:28:45 +08:00
										 |  |  | 	globalLocalDrives = localDrives | 
					
						
							| 
									
										
										
										
											2023-03-13 21:04:20 +08:00
										 |  |  | 	defer globalLocalDrivesMu.Unlock() | 
					
						
							| 
									
										
										
										
											2023-06-21 00:28:23 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	return z, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) NewNSLock(bucket string, objects ...string) RWLocker { | 
					
						
							| 
									
										
										
										
											2022-12-24 12:55:45 +08:00
										 |  |  | 	poolID := hashKey(z.distributionAlgo, "", len(z.serverPools), z.deploymentID) | 
					
						
							|  |  |  | 	if len(objects) >= 1 { | 
					
						
							|  |  |  | 		poolID = hashKey(z.distributionAlgo, objects[0], len(z.serverPools), z.deploymentID) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return z.serverPools[poolID].NewNSLock(bucket, objects...) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | // GetDisksID will return disks by their ID.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) GetDisksID(ids ...string) []StorageAPI { | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	idMap := make(map[string]struct{}) | 
					
						
							|  |  |  | 	for _, id := range ids { | 
					
						
							|  |  |  | 		idMap[id] = struct{}{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	res := make([]StorageAPI, 0, len(idMap)) | 
					
						
							| 
									
										
										
										
											2020-12-12 08:58:36 +08:00
										 |  |  | 	for _, s := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2023-01-13 20:16:23 +08:00
										 |  |  | 		for _, set := range s.sets { | 
					
						
							|  |  |  | 			for _, disk := range set.getDisks() { | 
					
						
							| 
									
										
										
										
											2020-12-12 08:58:36 +08:00
										 |  |  | 				if disk == OfflineDisk { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				if id, _ := disk.GetDiskID(); id != "" { | 
					
						
							|  |  |  | 					if _, ok := idMap[id]; ok { | 
					
						
							|  |  |  | 						res = append(res, disk) | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-10 02:29:16 +08:00
										 |  |  | // GetRawData will return all files with a given raw path to the callback.
 | 
					
						
							|  |  |  | // Errors are ignored, only errors from the callback are returned.
 | 
					
						
							|  |  |  | // For now only direct file paths are supported.
 | 
					
						
							| 
									
										
										
										
											2021-10-22 02:20:13 +08:00
										 |  |  | func (z *erasureServerPools) GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error { | 
					
						
							| 
									
										
										
										
											2021-10-02 02:50:00 +08:00
										 |  |  | 	found := 0 | 
					
						
							| 
									
										
										
										
											2021-07-10 02:29:16 +08:00
										 |  |  | 	for _, s := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2023-01-13 20:16:23 +08:00
										 |  |  | 		for _, set := range s.sets { | 
					
						
							|  |  |  | 			for _, disk := range set.getDisks() { | 
					
						
							| 
									
										
										
										
											2021-07-10 02:29:16 +08:00
										 |  |  | 				if disk == OfflineDisk { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-10-02 02:50:00 +08:00
										 |  |  | 				stats, err := disk.StatInfoFile(ctx, volume, file, true) | 
					
						
							| 
									
										
										
										
											2021-07-10 02:29:16 +08:00
										 |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-10-02 02:50:00 +08:00
										 |  |  | 				for _, si := range stats { | 
					
						
							|  |  |  | 					found++ | 
					
						
							|  |  |  | 					var r io.ReadCloser | 
					
						
							|  |  |  | 					if !si.Dir { | 
					
						
							|  |  |  | 						r, err = disk.ReadFileStream(ctx, volume, si.Name, 0, si.Size) | 
					
						
							|  |  |  | 						if err != nil { | 
					
						
							|  |  |  | 							continue | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 					} else { | 
					
						
							|  |  |  | 						r = io.NopCloser(bytes.NewBuffer([]byte{})) | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2022-01-25 03:28:45 +08:00
										 |  |  | 					// Keep disk path instead of ID, to ensure that the downloaded zip file can be
 | 
					
						
							|  |  |  | 					// easily automated with `minio server hostname{1...n}/disk{1...m}`.
 | 
					
						
							|  |  |  | 					err = fn(r, disk.Hostname(), disk.Endpoint().Path, pathJoin(volume, si.Name), si) | 
					
						
							| 
									
										
										
										
											2021-10-02 02:50:00 +08:00
										 |  |  | 					r.Close() | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						return err | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-07-10 02:29:16 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-10-02 02:50:00 +08:00
										 |  |  | 	if found == 0 { | 
					
						
							|  |  |  | 		return errFileNotFound | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-10 02:29:16 +08:00
										 |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-07 04:29:49 +08:00
										 |  |  | // Return the count of disks in each pool
 | 
					
						
							| 
									
										
										
										
											2021-01-23 04:09:24 +08:00
										 |  |  | func (z *erasureServerPools) SetDriveCounts() []int { | 
					
						
							|  |  |  | 	setDriveCounts := make([]int, len(z.serverPools)) | 
					
						
							|  |  |  | 	for i := range z.serverPools { | 
					
						
							|  |  |  | 		setDriveCounts[i] = z.serverPools[i].SetDriveCount() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return setDriveCounts | 
					
						
							| 
									
										
										
										
											2020-08-06 04:31:12 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | type serverPoolsAvailableSpace []poolAvailableSpace | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | type poolAvailableSpace struct { | 
					
						
							| 
									
										
										
										
											2022-05-26 04:20:20 +08:00
										 |  |  | 	Index      int | 
					
						
							|  |  |  | 	Available  uint64 | 
					
						
							|  |  |  | 	MaxUsedPct int // Used disk percentage of most filled disk, rounded down.
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // TotalAvailable - total available space
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (p serverPoolsAvailableSpace) TotalAvailable() uint64 { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	total := uint64(0) | 
					
						
							|  |  |  | 	for _, z := range p { | 
					
						
							|  |  |  | 		total += z.Available | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return total | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-05-26 04:20:20 +08:00
										 |  |  | // FilterMaxUsed will filter out any pools that has used percent bigger than max,
 | 
					
						
							|  |  |  | // unless all have that, in which case all are preserved.
 | 
					
						
							|  |  |  | func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { | 
					
						
							|  |  |  | 	// We aren't modifying p, only entries in it, so we don't need to receive a pointer.
 | 
					
						
							|  |  |  | 	if len(p) <= 1 { | 
					
						
							|  |  |  | 		// Nothing to do.
 | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var ok bool | 
					
						
							|  |  |  | 	for _, z := range p { | 
					
						
							|  |  |  | 		if z.MaxUsedPct < max { | 
					
						
							|  |  |  | 			ok = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		// All above limit.
 | 
					
						
							|  |  |  | 		// Do not modify
 | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Remove entries that are above.
 | 
					
						
							|  |  |  | 	for i, z := range p { | 
					
						
							|  |  |  | 		if z.MaxUsedPct < max { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		p[i].Available = 0 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | // getAvailablePoolIdx will return an index that can hold size bytes.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | // -1 is returned if no serverPools have available space for the size given.
 | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, object string, size int64) int { | 
					
						
							|  |  |  | 	serverPools := z.getServerPoolsAvailableSpace(ctx, bucket, object, size) | 
					
						
							| 
									
										
										
										
											2022-05-26 04:20:20 +08:00
										 |  |  | 	serverPools.FilterMaxUsed(100 - (100 * diskReserveFraction)) | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	total := serverPools.TotalAvailable() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	if total == 0 { | 
					
						
							| 
									
										
										
										
											2020-06-20 21:36:44 +08:00
										 |  |  | 		return -1 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// choose when we reach this many
 | 
					
						
							|  |  |  | 	choose := rand.Uint64() % total | 
					
						
							|  |  |  | 	atTotal := uint64(0) | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	for _, pool := range serverPools { | 
					
						
							|  |  |  | 		atTotal += pool.Available | 
					
						
							|  |  |  | 		if atTotal > choose && pool.Available > 0 { | 
					
						
							|  |  |  | 			return pool.Index | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Should not happen, but print values just in case.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	logger.LogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) | 
					
						
							| 
									
										
										
										
											2020-06-20 21:36:44 +08:00
										 |  |  | 	return -1 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | // getServerPoolsAvailableSpace will return the available space of each pool after storing the content.
 | 
					
						
							|  |  |  | // If there is not enough space the pool will return 0 bytes available.
 | 
					
						
							| 
									
										
										
										
											2022-05-25 09:57:14 +08:00
										 |  |  | // The size of each will be multiplied by the number of sets.
 | 
					
						
							| 
									
										
										
										
											2020-06-20 21:36:44 +08:00
										 |  |  | // Negative sizes are seen as 0 bytes.
 | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, bucket, object string, size int64) serverPoolsAvailableSpace { | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	serverPools := make(serverPoolsAvailableSpace, len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 	storageInfos := make([][]*DiskInfo, len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2022-05-25 09:57:14 +08:00
										 |  |  | 	nSets := make([]int, len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(z.serverPools)) | 
					
						
							|  |  |  | 	for index := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		index := index | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 		// Skip suspended pools or pools participating in rebalance for any new
 | 
					
						
							|  |  |  | 		// I/O.
 | 
					
						
							|  |  |  | 		if z.IsSuspended(index) || z.IsPoolRebalancing(index) { | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-05-25 09:57:14 +08:00
										 |  |  | 		pool := z.serverPools[index] | 
					
						
							|  |  |  | 		nSets[index] = pool.setCount | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 			// Get the set where it would be placed.
 | 
					
						
							| 
									
										
										
										
											2022-05-31 01:58:37 +08:00
										 |  |  | 			storageInfos[index] = getDiskInfos(ctx, pool.getHashedSet(object).getDisks()...) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for the go routines.
 | 
					
						
							|  |  |  | 	g.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, zinfo := range storageInfos { | 
					
						
							| 
									
										
										
										
											2023-03-23 07:22:37 +08:00
										 |  |  | 		if zinfo == nil { | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 			serverPools[i] = poolAvailableSpace{Index: i} | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2020-06-20 21:36:44 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-23 07:22:37 +08:00
										 |  |  | 		var available uint64 | 
					
						
							|  |  |  | 		if !isMinioMetaBucketName(bucket) { | 
					
						
							|  |  |  | 			if avail, err := hasSpaceFor(zinfo, size); err != nil && !avail { | 
					
						
							|  |  |  | 				serverPools[i] = poolAvailableSpace{Index: i} | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-05-26 04:20:20 +08:00
										 |  |  | 		var maxUsedPct int | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 		for _, disk := range zinfo { | 
					
						
							| 
									
										
										
										
											2022-05-26 21:01:50 +08:00
										 |  |  | 			if disk == nil || disk.Total == 0 { | 
					
						
							| 
									
										
										
										
											2021-06-10 02:14:47 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 			available += disk.Total - disk.Used | 
					
						
							| 
									
										
										
										
											2022-05-26 04:20:20 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// set maxUsedPct to the value from the disk with the least space percentage.
 | 
					
						
							|  |  |  | 			if pctUsed := int(disk.Used * 100 / disk.Total); pctUsed > maxUsedPct { | 
					
						
							|  |  |  | 				maxUsedPct = pctUsed | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-20 21:36:44 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-05-25 09:57:14 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Since we are comparing pools that may have a different number of sets
 | 
					
						
							|  |  |  | 		// we multiply by the number of sets in the pool.
 | 
					
						
							|  |  |  | 		// This will compensate for differences in set sizes
 | 
					
						
							|  |  |  | 		// when choosing destination pool.
 | 
					
						
							|  |  |  | 		// Different set sizes are already compensated by less disks.
 | 
					
						
							|  |  |  | 		available *= uint64(nSets[i]) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 		serverPools[i] = poolAvailableSpace{ | 
					
						
							| 
									
										
										
										
											2022-05-26 04:20:20 +08:00
										 |  |  | 			Index:      i, | 
					
						
							|  |  |  | 			Available:  available, | 
					
						
							|  |  |  | 			MaxUsedPct: maxUsedPct, | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	return serverPools | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | // PoolObjInfo represents the state of current object version per pool
 | 
					
						
							|  |  |  | type PoolObjInfo struct { | 
					
						
							|  |  |  | 	Index   int | 
					
						
							|  |  |  | 	ObjInfo ObjectInfo | 
					
						
							|  |  |  | 	Err     error | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | func (z *erasureServerPools) getPoolInfoExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (PoolObjInfo, error) { | 
					
						
							|  |  |  | 	poolObjInfos := make([]PoolObjInfo, len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	poolOpts := make([]ObjectOptions, len(z.serverPools)) | 
					
						
							|  |  |  | 	for i := range z.serverPools { | 
					
						
							|  |  |  | 		poolOpts[i] = opts | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	for i, pool := range z.serverPools { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		go func(i int, pool *erasureSets, opts ObjectOptions) { | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | 			defer wg.Done() | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 			// remember the pool index, we may sort the slice original index might be lost.
 | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 			pinfo := PoolObjInfo{ | 
					
						
							|  |  |  | 				Index: i, | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2022-02-09 12:08:23 +08:00
										 |  |  | 			// do not remove this check as it can lead to inconsistencies
 | 
					
						
							|  |  |  | 			// for all callers of bucket replication.
 | 
					
						
							| 
									
										
										
										
											2023-06-17 22:30:53 +08:00
										 |  |  | 			if !opts.MetadataChg { | 
					
						
							|  |  |  | 				opts.VersionID = "" | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 			pinfo.ObjInfo, pinfo.Err = pool.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 			poolObjInfos[i] = pinfo | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		}(i, pool, poolOpts[i]) | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 	// Sort the objInfos such that we always serve latest
 | 
					
						
							|  |  |  | 	// this is a defensive change to handle any duplicate
 | 
					
						
							|  |  |  | 	// content that may have been created, we always serve
 | 
					
						
							|  |  |  | 	// the latest object.
 | 
					
						
							|  |  |  | 	sort.Slice(poolObjInfos, func(i, j int) bool { | 
					
						
							|  |  |  | 		mtime1 := poolObjInfos[i].ObjInfo.ModTime | 
					
						
							|  |  |  | 		mtime2 := poolObjInfos[j].ObjInfo.ModTime | 
					
						
							|  |  |  | 		return mtime1.After(mtime2) | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-08 23:03:29 +08:00
										 |  |  | 	defPool := PoolObjInfo{Index: -1} | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 	for _, pinfo := range poolObjInfos { | 
					
						
							| 
									
										
										
										
											2022-07-17 10:35:24 +08:00
										 |  |  | 		// skip all objects from suspended pools if asked by the
 | 
					
						
							|  |  |  | 		// caller.
 | 
					
						
							| 
									
										
										
										
											2023-04-12 02:17:46 +08:00
										 |  |  | 		if opts.SkipDecommissioned && z.IsSuspended(pinfo.Index) { | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 		// Skip object if it's from pools participating in a rebalance operation.
 | 
					
						
							|  |  |  | 		if opts.SkipRebalancing && z.IsPoolRebalancing(pinfo.Index) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-04-12 02:17:46 +08:00
										 |  |  | 		if pinfo.Err == nil { | 
					
						
							|  |  |  | 			// found a pool
 | 
					
						
							|  |  |  | 			return pinfo, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-06-17 22:30:53 +08:00
										 |  |  | 		if isErrReadQuorum(pinfo.Err) && !opts.MetadataChg { | 
					
						
							| 
									
										
										
										
											2023-04-12 02:17:46 +08:00
										 |  |  | 			// read quorum is returned when the object is visibly
 | 
					
						
							|  |  |  | 			// present but its unreadable, we simply ask the writes to
 | 
					
						
							|  |  |  | 			// schedule to this pool instead. If there is no quorum
 | 
					
						
							|  |  |  | 			// it will fail anyways, however if there is quorum available
 | 
					
						
							|  |  |  | 			// with enough disks online but sufficiently inconsistent to
 | 
					
						
							|  |  |  | 			// break parity threshold, allow them to be overwritten
 | 
					
						
							|  |  |  | 			// or allow new versions to be added.
 | 
					
						
							|  |  |  | 			return pinfo, nil | 
					
						
							| 
									
										
										
										
											2022-02-09 12:08:23 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-08 23:03:29 +08:00
										 |  |  | 		defPool = pinfo | 
					
						
							| 
									
										
										
										
											2023-04-12 02:17:46 +08:00
										 |  |  | 		if !isErrObjectNotFound(pinfo.Err) { | 
					
						
							|  |  |  | 			return pinfo, pinfo.Err | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-12 02:17:46 +08:00
										 |  |  | 		// No object exists or its a delete marker,
 | 
					
						
							|  |  |  | 		// check objInfo to confirm.
 | 
					
						
							|  |  |  | 		if pinfo.ObjInfo.DeleteMarker && pinfo.ObjInfo.Name != "" { | 
					
						
							|  |  |  | 			return pinfo, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-03-08 23:03:29 +08:00
										 |  |  | 	if opts.ReplicationRequest && opts.DeleteMarker && defPool.Index >= 0 { | 
					
						
							|  |  |  | 		// If the request is a delete marker replication request, return a default pool
 | 
					
						
							|  |  |  | 		// in cases where the object does not exist.
 | 
					
						
							|  |  |  | 		// This is to ensure that the delete marker is replicated to the destination.
 | 
					
						
							|  |  |  | 		return defPool, nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 	return PoolObjInfo{}, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (idx int, err error) { | 
					
						
							|  |  |  | 	pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return -1, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return pinfo.Index, nil | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | // getPoolIdxExistingNoLock returns the (first) found object pool index containing an object.
 | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | // If the object exists, but the latest version is a delete marker, the index with it is still returned.
 | 
					
						
							|  |  |  | // If the object does not exist ObjectNotFound error is returned.
 | 
					
						
							|  |  |  | // If any other error is found, it is returned.
 | 
					
						
							| 
									
										
										
										
											2022-07-05 22:37:24 +08:00
										 |  |  | // The check is skipped if there is only one pool, and 0, nil is always returned in that case.
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | func (z *erasureServerPools) getPoolIdxExistingNoLock(ctx context.Context, bucket, object string) (idx int, err error) { | 
					
						
							|  |  |  | 	return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{ | 
					
						
							| 
									
										
										
										
											2022-07-17 10:35:24 +08:00
										 |  |  | 		NoLock:             true, | 
					
						
							|  |  |  | 		SkipDecommissioned: true, | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 		SkipRebalancing:    true, | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | func (z *erasureServerPools) getPoolIdxNoLock(ctx context.Context, bucket, object string, size int64) (idx int, err error) { | 
					
						
							|  |  |  | 	idx, err = z.getPoolIdxExistingNoLock(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil && !isErrObjectNotFound(err) { | 
					
						
							|  |  |  | 		return idx, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if isErrObjectNotFound(err) { | 
					
						
							|  |  |  | 		idx = z.getAvailablePoolIdx(ctx, bucket, object, size) | 
					
						
							|  |  |  | 		if idx < 0 { | 
					
						
							|  |  |  | 			return -1, toObjectErr(errDiskFull) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return idx, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | // getPoolIdx returns the found previous object and its corresponding pool idx,
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | // if none are found falls back to most available space pool, this function is
 | 
					
						
							|  |  |  | // designed to be only used by PutObject, CopyObject (newObject creation) and NewMultipartUpload.
 | 
					
						
							| 
									
										
										
										
											2021-02-11 03:45:02 +08:00
										 |  |  | func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) { | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 	idx, err = z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{ | 
					
						
							|  |  |  | 		SkipDecommissioned: true, | 
					
						
							|  |  |  | 		SkipRebalancing:    true, | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2021-05-07 01:45:33 +08:00
										 |  |  | 	if err != nil && !isErrObjectNotFound(err) { | 
					
						
							|  |  |  | 		return idx, err | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-17 11:36:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-07 01:45:33 +08:00
										 |  |  | 	if isErrObjectNotFound(err) { | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 		idx = z.getAvailablePoolIdx(ctx, bucket, object, size) | 
					
						
							| 
									
										
										
										
											2021-05-07 01:45:33 +08:00
										 |  |  | 		if idx < 0 { | 
					
						
							|  |  |  | 			return -1, toObjectErr(errDiskFull) | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-20 21:36:44 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return idx, nil | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) Shutdown(ctx context.Context) error { | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	for index := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 			return z.serverPools[index].Shutdown(ctx) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, err := range g.Wait() { | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// let's the rest shutdown
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) { | 
					
						
							|  |  |  | 	b.Type = madmin.Erasure | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD) | 
					
						
							| 
									
										
										
										
											2022-06-28 11:22:18 +08:00
										 |  |  | 	if scParity < 0 { | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 		scParity = z.serverPools[0].defaultParityCount | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS) | 
					
						
							| 
									
										
										
										
											2021-01-23 04:09:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Data blocks can vary per pool, but parity is same.
 | 
					
						
							| 
									
										
										
										
											2022-07-28 05:41:59 +08:00
										 |  |  | 	for i, setDriveCount := range z.SetDriveCounts() { | 
					
						
							| 
									
										
										
										
											2021-01-23 04:09:24 +08:00
										 |  |  | 		b.StandardSCData = append(b.StandardSCData, setDriveCount-scParity) | 
					
						
							|  |  |  | 		b.RRSCData = append(b.RRSCData, setDriveCount-rrSCParity) | 
					
						
							| 
									
										
										
										
											2022-07-28 05:41:59 +08:00
										 |  |  | 		b.DrivesPerSet = append(b.DrivesPerSet, setDriveCount) | 
					
						
							|  |  |  | 		b.TotalSets = append(b.TotalSets, z.serverPools[i].setCount) | 
					
						
							| 
									
										
										
										
											2021-01-23 04:09:24 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	b.StandardSCParity = scParity | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | 	b.RRSCParity = rrSCParity | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | func (z *erasureServerPools) LocalStorageInfo(ctx context.Context) StorageInfo { | 
					
						
							| 
									
										
										
										
											2021-03-03 09:28:04 +08:00
										 |  |  | 	var storageInfo StorageInfo | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageInfos := make([]StorageInfo, len(z.serverPools)) | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(z.serverPools)) | 
					
						
							|  |  |  | 	for index := range z.serverPools { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | 			storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx) | 
					
						
							| 
									
										
										
										
											2021-03-03 09:28:04 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for the go routines.
 | 
					
						
							|  |  |  | 	g.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageInfo.Backend = z.BackendInfo() | 
					
						
							|  |  |  | 	for _, lstorageInfo := range storageInfos { | 
					
						
							|  |  |  | 		storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | 	return storageInfo | 
					
						
							| 
									
										
										
										
											2021-03-03 09:28:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | func (z *erasureServerPools) StorageInfo(ctx context.Context) StorageInfo { | 
					
						
							|  |  |  | 	return globalNotificationSys.StorageInfo(z) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-02-24 11:33:31 +08:00
										 |  |  | func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, healScanMode madmin.HealScanMode) error { | 
					
						
							| 
									
										
										
										
											2021-05-20 10:25:44 +08:00
										 |  |  | 	// Updates must be closed before we return.
 | 
					
						
							|  |  |  | 	defer close(updates) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(ctx) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-12 22:02:37 +08:00
										 |  |  | 	var wg sync.WaitGroup | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	var mu sync.Mutex | 
					
						
							|  |  |  | 	var results []dataUsageCache | 
					
						
							|  |  |  | 	var firstErr error | 
					
						
							| 
									
										
										
										
											2020-12-16 09:34:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | 	allBuckets, err := z.ListBuckets(ctx, BucketOptions{}) | 
					
						
							| 
									
										
										
										
											2020-12-16 09:34:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-08 01:52:53 +08:00
										 |  |  | 	if len(allBuckets) == 0 { | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		updates <- DataUsageInfo{} // no buckets found update data usage to reflect latest state
 | 
					
						
							| 
									
										
										
										
											2021-01-08 01:52:53 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	// Scanner latest allBuckets first.
 | 
					
						
							| 
									
										
										
										
											2020-12-16 09:34:54 +08:00
										 |  |  | 	sort.Slice(allBuckets, func(i, j int) bool { | 
					
						
							|  |  |  | 		return allBuckets[i].Created.After(allBuckets[j].Created) | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	// Collect for each set in serverPools.
 | 
					
						
							|  |  |  | 	for _, z := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2020-09-25 00:53:38 +08:00
										 |  |  | 		for _, erObj := range z.sets { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 			wg.Add(1) | 
					
						
							|  |  |  | 			results = append(results, dataUsageCache{}) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			go func(i int, erObj *erasureObjects) { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 				updates := make(chan dataUsageCache, 1) | 
					
						
							|  |  |  | 				defer close(updates) | 
					
						
							|  |  |  | 				// Start update collector.
 | 
					
						
							|  |  |  | 				go func() { | 
					
						
							|  |  |  | 					defer wg.Done() | 
					
						
							|  |  |  | 					for info := range updates { | 
					
						
							|  |  |  | 						mu.Lock() | 
					
						
							|  |  |  | 						results[i] = info | 
					
						
							|  |  |  | 						mu.Unlock() | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				}() | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 				// Start scanner. Blocks until done.
 | 
					
						
							| 
									
										
										
										
											2023-02-24 11:33:31 +08:00
										 |  |  | 				err := erObj.nsScanner(ctx, allBuckets, wantCycle, updates, healScanMode) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 				if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 					logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 					mu.Lock() | 
					
						
							|  |  |  | 					if firstErr == nil { | 
					
						
							|  |  |  | 						firstErr = err | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					// Cancel remaining...
 | 
					
						
							|  |  |  | 					cancel() | 
					
						
							|  |  |  | 					mu.Unlock() | 
					
						
							|  |  |  | 					return | 
					
						
							| 
									
										
										
										
											2019-12-12 22:02:37 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			}(len(results)-1, erObj) | 
					
						
							| 
									
										
										
										
											2019-12-12 22:02:37 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	updateCloser := make(chan chan struct{}) | 
					
						
							|  |  |  | 	go func() { | 
					
						
							|  |  |  | 		updateTicker := time.NewTicker(30 * time.Second) | 
					
						
							|  |  |  | 		defer updateTicker.Stop() | 
					
						
							|  |  |  | 		var lastUpdate time.Time | 
					
						
							| 
									
										
										
										
											2020-07-15 09:59:05 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 		// We need to merge since we will get the same buckets from each pool.
 | 
					
						
							| 
									
										
										
										
											2020-07-15 09:59:05 +08:00
										 |  |  | 		// Therefore to get the exact bucket sizes we must merge before we can convert.
 | 
					
						
							| 
									
										
										
										
											2020-07-25 02:02:10 +08:00
										 |  |  | 		var allMerged dataUsageCache | 
					
						
							| 
									
										
										
										
											2020-07-15 09:59:05 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		update := func() { | 
					
						
							|  |  |  | 			mu.Lock() | 
					
						
							|  |  |  | 			defer mu.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-25 02:02:10 +08:00
										 |  |  | 			allMerged = dataUsageCache{Info: dataUsageCacheInfo{Name: dataUsageRoot}} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 			for _, info := range results { | 
					
						
							|  |  |  | 				if info.Info.LastUpdate.IsZero() { | 
					
						
							|  |  |  | 					// Not filled yet.
 | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				allMerged.merge(info) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if allMerged.root() != nil && allMerged.Info.LastUpdate.After(lastUpdate) { | 
					
						
							|  |  |  | 				updates <- allMerged.dui(allMerged.Info.Name, allBuckets) | 
					
						
							|  |  |  | 				lastUpdate = allMerged.Info.LastUpdate | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for { | 
					
						
							|  |  |  | 			select { | 
					
						
							|  |  |  | 			case <-ctx.Done(): | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			case v := <-updateCloser: | 
					
						
							|  |  |  | 				update() | 
					
						
							|  |  |  | 				close(v) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			case <-updateTicker.C: | 
					
						
							|  |  |  | 				update() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-12 22:02:37 +08:00
										 |  |  | 	wg.Wait() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	ch := make(chan struct{}) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	select { | 
					
						
							|  |  |  | 	case updateCloser <- ch: | 
					
						
							|  |  |  | 		<-ch | 
					
						
							|  |  |  | 	case <-ctx.Done(): | 
					
						
							| 
									
										
										
										
											2023-03-01 13:34:45 +08:00
										 |  |  | 		mu.Lock() | 
					
						
							| 
									
										
										
										
											2020-08-25 01:15:46 +08:00
										 |  |  | 		if firstErr == nil { | 
					
						
							|  |  |  | 			firstErr = ctx.Err() | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-01 13:34:45 +08:00
										 |  |  | 		defer mu.Unlock() | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	return firstErr | 
					
						
							| 
									
										
										
										
											2019-12-12 22:02:37 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | // MakeBucket - creates a new bucket across all serverPools simultaneously
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | // even if one of the sets fail to create buckets, we proceed all the successful
 | 
					
						
							|  |  |  | // operations.
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | func (z *erasureServerPools) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	// Verify if bucket is valid.
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	if !isMinioMetaBucketName(bucket) { | 
					
						
							|  |  |  | 		if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil { | 
					
						
							|  |  |  | 			return BucketNameInvalid{Bucket: bucket} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		if !opts.NoLock { | 
					
						
							|  |  |  | 			// Lock the bucket name before creating.
 | 
					
						
							|  |  |  | 			lk := z.NewNSLock(minioMetaTmpBucket, bucket+".lck") | 
					
						
							|  |  |  | 			lkctx, err := lk.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2022-03-08 08:18:57 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			ctx = lkctx.Context() | 
					
						
							|  |  |  | 			defer lk.Unlock(lkctx) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	if err := z.s3Peer.MakeBucket(ctx, bucket, opts); err != nil { | 
					
						
							|  |  |  | 		if _, ok := err.(BucketExists); !ok { | 
					
						
							|  |  |  | 			// Delete created buckets, ignoring errors.
 | 
					
						
							|  |  |  | 			z.DeleteBucket(context.Background(), bucket, DeleteBucketOptions{ | 
					
						
							|  |  |  | 				NoLock:     true, | 
					
						
							|  |  |  | 				NoRecreate: true, | 
					
						
							|  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	// If it doesn't exist we get a new, so ignore errors
 | 
					
						
							| 
									
										
										
										
											2020-05-21 01:18:15 +08:00
										 |  |  | 	meta := newBucketMetadata(bucket) | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | 	meta.SetCreatedAt(opts.CreatedAt) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if opts.LockEnabled { | 
					
						
							|  |  |  | 		meta.VersioningConfigXML = enabledBucketVersioningConfig | 
					
						
							| 
									
										
										
										
											2020-05-22 02:03:59 +08:00
										 |  |  | 		meta.ObjectLockConfigXML = enabledBucketObjectLockConfig | 
					
						
							| 
									
										
										
										
											2020-05-21 01:18:15 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-01 10:30:06 +08:00
										 |  |  | 	if opts.VersioningEnabled { | 
					
						
							|  |  |  | 		meta.VersioningConfigXML = enabledBucketVersioningConfig | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-07 01:24:40 +08:00
										 |  |  | 	if err := meta.Save(context.Background(), z); err != nil { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 		return toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2020-05-09 04:44:44 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	globalBucketMetadataSys.Set(bucket, meta) | 
					
						
							| 
									
										
										
										
											2020-05-09 04:44:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	// Success.
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, err error) { | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 	if err = checkGetObjArgs(ctx, bucket, object); err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		return z.serverPools[0].GetObjectNInfo(ctx, bucket, object, rs, h, opts) | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var unlockOnDefer bool | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	nsUnlocker := func() {} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	defer func() { | 
					
						
							|  |  |  | 		if unlockOnDefer { | 
					
						
							|  |  |  | 			nsUnlocker() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Acquire lock
 | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	if !opts.NoLock { | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 		lock := z.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		lkctx, err := lock.GetRLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		nsUnlocker = func() { lock.RUnlock(lkctx) } | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 		unlockOnDefer = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 	checkPrecondFn := opts.CheckPrecondFn | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 	opts.CheckPrecondFn = nil // do not need to apply pre-conditions at lower layer.
 | 
					
						
							|  |  |  | 	opts.NoLock = true        // no locks needed at lower levels for getObjectInfo()
 | 
					
						
							|  |  |  | 	objInfo, zIdx, err := z.getLatestObjectInfoWithIdx(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if objInfo.DeleteMarker { | 
					
						
							|  |  |  | 			if opts.VersionID == "" { | 
					
						
							|  |  |  | 				return &GetObjectReader{ | 
					
						
							|  |  |  | 					ObjInfo: objInfo, | 
					
						
							|  |  |  | 				}, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 			// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 			return &GetObjectReader{ | 
					
						
							|  |  |  | 				ObjInfo: objInfo, | 
					
						
							|  |  |  | 			}, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 	// check preconditions before reading the stream.
 | 
					
						
							|  |  |  | 	if checkPrecondFn != nil && checkPrecondFn(objInfo) { | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 		return nil, PreConditionFailed{} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	opts.NoLock = true | 
					
						
							|  |  |  | 	gr, err = z.serverPools[zIdx].GetObjectNInfo(ctx, bucket, object, rs, h, opts) | 
					
						
							| 
									
										
										
										
											2022-05-10 22:47:40 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if unlockOnDefer { | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		unlockOnDefer = gr.ObjInfo.Inlined | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !unlockOnDefer { | 
					
						
							| 
									
										
										
										
											2022-05-10 22:47:40 +08:00
										 |  |  | 		return gr.WithCleanupFuncs(nsUnlocker), nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return gr, nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | // getLatestObjectInfoWithIdx returns the objectInfo of the latest object from multiple pools (this function
 | 
					
						
							|  |  |  | // is present in-case there were duplicate writes to both pools, this function also returns the
 | 
					
						
							|  |  |  | // additional index where the latest object exists, that is used to start the GetObject stream.
 | 
					
						
							|  |  |  | func (z *erasureServerPools) getLatestObjectInfoWithIdx(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, int, error) { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 	results := make([]struct { | 
					
						
							|  |  |  | 		zIdx int | 
					
						
							|  |  |  | 		oi   ObjectInfo | 
					
						
							|  |  |  | 		err  error | 
					
						
							|  |  |  | 	}, len(z.serverPools)) | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	for i, pool := range z.serverPools { | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(i int, pool *erasureSets) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 			results[i].zIdx = i | 
					
						
							|  |  |  | 			results[i].oi, results[i].err = pool.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 		}(i, pool) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-11 14:07:16 +08:00
										 |  |  | 	// Sort the objInfos such that we always serve latest
 | 
					
						
							|  |  |  | 	// this is a defensive change to handle any duplicate
 | 
					
						
							|  |  |  | 	// content that may have been created, we always serve
 | 
					
						
							|  |  |  | 	// the latest object.
 | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 	sort.Slice(results, func(i, j int) bool { | 
					
						
							|  |  |  | 		a, b := results[i], results[j] | 
					
						
							|  |  |  | 		if a.oi.ModTime.Equal(b.oi.ModTime) { | 
					
						
							| 
									
										
										
										
											2022-07-05 22:37:24 +08:00
										 |  |  | 			// On tiebreak, select the lowest pool index.
 | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 			return a.zIdx < b.zIdx | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return a.oi.ModTime.After(b.oi.ModTime) | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-15 02:00:13 +08:00
										 |  |  | 	for _, res := range results { | 
					
						
							|  |  |  | 		err := res.err | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 			return res.oi, res.zIdx, nil | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 			// some errors such as MethodNotAllowed for delete marker
 | 
					
						
							|  |  |  | 			// should be returned upwards.
 | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 			return res.oi, res.zIdx, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-03-28 14:39:50 +08:00
										 |  |  | 		// When its a delete marker and versionID is empty
 | 
					
						
							|  |  |  | 		// we should simply return the error right away.
 | 
					
						
							|  |  |  | 		if res.oi.DeleteMarker && opts.VersionID == "" { | 
					
						
							|  |  |  | 			return res.oi, res.zIdx, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = decodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	if opts.VersionID != "" { | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 		return ObjectInfo{}, -1, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID} | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 	return ObjectInfo{}, -1, ObjectNotFound{Bucket: bucket, Object: object} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 	if err = checkGetObjArgs(ctx, bucket, object); err != nil { | 
					
						
							|  |  |  | 		return objInfo, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if z.SinglePool() { | 
					
						
							|  |  |  | 		return z.serverPools[0].GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !opts.NoLock { | 
					
						
							|  |  |  | 		opts.NoLock = true // avoid taking locks at lower levels for multi-pool setups.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Lock the object before reading.
 | 
					
						
							|  |  |  | 		lk := z.NewNSLock(bucket, object) | 
					
						
							|  |  |  | 		lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 		defer lk.RUnlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objInfo, _, err = z.getLatestObjectInfoWithIdx(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	return objInfo, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | // PutObject - writes an object to least used erasure pool.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 	// Validate put object input args.
 | 
					
						
							|  |  |  | 	if err := checkPutObjectArgs(ctx, bucket, object, z); err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2023-03-23 07:22:37 +08:00
										 |  |  | 		if !isMinioMetaBucketName(bucket) { | 
					
						
							|  |  |  | 			avail, err := hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), data.Size()) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2023-06-25 11:29:13 +08:00
										 |  |  | 				logger.LogOnceIf(ctx, err, "erasure-write-quorum") | 
					
						
							| 
									
										
										
										
											2023-03-23 07:22:37 +08:00
										 |  |  | 				return ObjectInfo{}, toObjectErr(errErasureWriteQuorum) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !avail { | 
					
						
							|  |  |  | 				return ObjectInfo{}, toObjectErr(errDiskFull) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].PutObject(ctx, bucket, object, data, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-06-22 00:25:10 +08:00
										 |  |  | 	if !opts.NoLock { | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | 		ns := z.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-06-22 00:25:10 +08:00
										 |  |  | 		lkctx, err := ns.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 		defer ns.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-06-22 00:25:10 +08:00
										 |  |  | 		opts.NoLock = true | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | 	idx, err := z.getPoolIdxNoLock(ctx, bucket, object, data.Size()) | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	// Overwrite the object at the right pool
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	return z.serverPools[idx].PutObject(ctx, bucket, object, data, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-16 09:43:14 +08:00
										 |  |  | func (z *erasureServerPools) deletePrefix(ctx context.Context, bucket string, prefix string) error { | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for _, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if _, err := pool.DeleteObject(ctx, bucket, prefix, ObjectOptions{DeletePrefix: true}); err != nil { | 
					
						
							| 
									
										
										
										
											2021-06-16 09:43:14 +08:00
										 |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 	if err = checkDelObjArgs(ctx, bucket, object); err != nil { | 
					
						
							|  |  |  | 		return objInfo, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-16 09:43:14 +08:00
										 |  |  | 	if opts.DeletePrefix { | 
					
						
							|  |  |  | 		err := z.deletePrefix(ctx, bucket, object) | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Acquire a write lock before deleting the object.
 | 
					
						
							|  |  |  | 	lk := z.NewNSLock(bucket, object) | 
					
						
							|  |  |  | 	lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer lk.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-02-09 10:12:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 	gopts := opts | 
					
						
							|  |  |  | 	gopts.NoLock = true | 
					
						
							|  |  |  | 	pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, gopts) | 
					
						
							| 
									
										
										
										
											2021-02-09 10:12:28 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(InsufficientReadQuorum); ok { | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 			return objInfo, InsufficientWriteQuorum{} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-02-09 10:12:28 +08:00
										 |  |  | 		return objInfo, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-09 03:32:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 	// Delete marker already present we are not going to create new delete markers.
 | 
					
						
							|  |  |  | 	if pinfo.ObjInfo.DeleteMarker && opts.VersionID == "" { | 
					
						
							|  |  |  | 		pinfo.ObjInfo.Name = decodeDirObject(object) | 
					
						
							|  |  |  | 		return pinfo.ObjInfo, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	objInfo.Name = decodeDirObject(object) | 
					
						
							|  |  |  | 	return objInfo, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	derrs := make([]error, len(objects)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	dobjects := make([]DeletedObject, len(objects)) | 
					
						
							| 
									
										
										
										
											2020-06-19 01:25:07 +08:00
										 |  |  | 	objSets := set.NewStringSet() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	for i := range derrs { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 		objects[i].ObjectName = encodeDirObject(objects[i].ObjectName) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		derrs[i] = checkDelObjArgs(ctx, bucket, objects[i].ObjectName) | 
					
						
							| 
									
										
										
										
											2020-06-19 01:25:07 +08:00
										 |  |  | 		objSets.Add(objects[i].ObjectName) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 	// Acquire a bulk write lock across 'objects'
 | 
					
						
							|  |  |  | 	multiDeleteLock := z.NewNSLock(bucket, objSets.ToSlice()...) | 
					
						
							|  |  |  | 	lkctx, err := multiDeleteLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		for i := range derrs { | 
					
						
							|  |  |  | 			derrs[i] = err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return dobjects, derrs | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer multiDeleteLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Fetch location of up to 10 objects concurrently.
 | 
					
						
							| 
									
										
										
										
											2021-02-11 06:25:43 +08:00
										 |  |  | 	poolObjIdxMap := map[int][]ObjectToDelete{} | 
					
						
							|  |  |  | 	origIndexMap := map[int][]int{} | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 00:33:56 +08:00
										 |  |  | 	// Always perform 1/10th of the number of objects per delete
 | 
					
						
							|  |  |  | 	concurrent := len(objects) / 10 | 
					
						
							|  |  |  | 	if concurrent <= 10 { | 
					
						
							|  |  |  | 		// if we cannot get 1/10th then choose the number of
 | 
					
						
							|  |  |  | 		// objects as concurrent.
 | 
					
						
							|  |  |  | 		concurrent = len(objects) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 	var mu sync.Mutex | 
					
						
							| 
									
										
										
										
											2022-08-19 00:33:56 +08:00
										 |  |  | 	eg := errgroup.WithNErrs(len(objects)).WithConcurrency(concurrent) | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 	for j, obj := range objects { | 
					
						
							|  |  |  | 		j := j | 
					
						
							|  |  |  | 		obj := obj | 
					
						
							|  |  |  | 		eg.Go(func() error { | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 			pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, obj.ObjectName, ObjectOptions{ | 
					
						
							| 
									
										
										
										
											2022-07-07 00:53:16 +08:00
										 |  |  | 				NoLock: true, | 
					
						
							|  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2021-11-16 01:46:55 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 				if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 					derrs[j] = err | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 				dobjects[j] = DeletedObject{ | 
					
						
							| 
									
										
										
										
											2023-01-20 23:46:06 +08:00
										 |  |  | 					ObjectName: decodeDirObject(obj.ObjectName), | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 					VersionID:  obj.VersionID, | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Delete marker already present we are not going to create new delete markers.
 | 
					
						
							|  |  |  | 			if pinfo.ObjInfo.DeleteMarker && obj.VersionID == "" { | 
					
						
							|  |  |  | 				dobjects[j] = DeletedObject{ | 
					
						
							|  |  |  | 					DeleteMarker:          pinfo.ObjInfo.DeleteMarker, | 
					
						
							|  |  |  | 					DeleteMarkerVersionID: pinfo.ObjInfo.VersionID, | 
					
						
							|  |  |  | 					DeleteMarkerMTime:     DeleteMarkerMTime{pinfo.ObjInfo.ModTime}, | 
					
						
							| 
									
										
										
										
											2023-01-20 23:46:06 +08:00
										 |  |  | 					ObjectName:            decodeDirObject(pinfo.ObjInfo.Name), | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 				return nil | 
					
						
							| 
									
										
										
										
											2021-03-17 02:02:20 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			idx := pinfo.Index | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 			mu.Lock() | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 			defer mu.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-11 06:25:43 +08:00
										 |  |  | 			poolObjIdxMap[idx] = append(poolObjIdxMap[idx], obj) | 
					
						
							|  |  |  | 			origIndexMap[idx] = append(origIndexMap[idx], j) | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, j) | 
					
						
							| 
									
										
										
										
											2021-02-11 06:25:43 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-16 01:46:55 +08:00
										 |  |  | 	eg.Wait() // wait to check all the pools.
 | 
					
						
							| 
									
										
										
										
											2020-11-29 13:15:45 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 	if len(poolObjIdxMap) > 0 { | 
					
						
							|  |  |  | 		// Delete concurrently in all server pools.
 | 
					
						
							|  |  |  | 		var wg sync.WaitGroup | 
					
						
							|  |  |  | 		wg.Add(len(z.serverPools)) | 
					
						
							|  |  |  | 		for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 			go func(idx int, pool *erasureSets) { | 
					
						
							|  |  |  | 				defer wg.Done() | 
					
						
							|  |  |  | 				objs := poolObjIdxMap[idx] | 
					
						
							|  |  |  | 				if len(objs) > 0 { | 
					
						
							|  |  |  | 					orgIndexes := origIndexMap[idx] | 
					
						
							|  |  |  | 					deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts) | 
					
						
							|  |  |  | 					mu.Lock() | 
					
						
							|  |  |  | 					for i, derr := range errs { | 
					
						
							|  |  |  | 						if derr != nil { | 
					
						
							|  |  |  | 							derrs[orgIndexes[i]] = derr | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						deletedObjects[i].ObjectName = decodeDirObject(deletedObjects[i].ObjectName) | 
					
						
							|  |  |  | 						dobjects[orgIndexes[i]] = deletedObjects[i] | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 					mu.Unlock() | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2022-08-19 07:41:59 +08:00
										 |  |  | 			}(idx, pool) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		wg.Wait() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-17 22:50:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return dobjects, derrs | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	srcObject = encodeDirObject(srcObject) | 
					
						
							|  |  |  | 	dstObject = encodeDirObject(dstObject) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-22 00:25:10 +08:00
										 |  |  | 	if !dstOpts.NoLock { | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | 		ns := z.NewNSLock(dstBucket, dstObject) | 
					
						
							| 
									
										
										
										
											2021-06-22 00:25:10 +08:00
										 |  |  | 		lkctx, err := ns.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 		defer ns.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-06-22 00:25:10 +08:00
										 |  |  | 		dstOpts.NoLock = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | 	poolIdx, err := z.getPoolIdxNoLock(ctx, dstBucket, dstObject, srcInfo.Size) | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return objInfo, err | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 	if cpSrcDstSame && srcInfo.metadataOnly { | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 		// Version ID is set for the destination and source == destination version ID.
 | 
					
						
							| 
									
										
										
										
											2020-06-19 23:44:51 +08:00
										 |  |  | 		if dstOpts.VersionID != "" && srcOpts.VersionID == dstOpts.VersionID { | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			return z.serverPools[poolIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) | 
					
						
							| 
									
										
										
										
											2020-06-19 23:44:51 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 		// Destination is not versioned and source version ID is empty
 | 
					
						
							|  |  |  | 		// perform an in-place update.
 | 
					
						
							| 
									
										
										
										
											2020-06-19 23:44:51 +08:00
										 |  |  | 		if !dstOpts.Versioned && srcOpts.VersionID == "" { | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			return z.serverPools[poolIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) | 
					
						
							| 
									
										
										
										
											2020-06-19 23:44:51 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 		// Destination is versioned, source is not destination version,
 | 
					
						
							|  |  |  | 		// as a special case look for if the source object is not legacy
 | 
					
						
							|  |  |  | 		// from older format, for older format we will rewrite them as
 | 
					
						
							|  |  |  | 		// newer using PutObject() - this is an optimization to save space
 | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 		if dstOpts.Versioned && srcOpts.VersionID != dstOpts.VersionID && !srcInfo.Legacy { | 
					
						
							|  |  |  | 			// CopyObject optimization where we don't create an entire copy
 | 
					
						
							|  |  |  | 			// of the content, instead we add a reference.
 | 
					
						
							|  |  |  | 			srcInfo.versionOnly = true | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			return z.serverPools[poolIdx].CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts) | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-19 23:44:51 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-18 02:13:41 +08:00
										 |  |  | 	putOpts := ObjectOptions{ | 
					
						
							|  |  |  | 		ServerSideEncryption: dstOpts.ServerSideEncryption, | 
					
						
							|  |  |  | 		UserDefined:          srcInfo.UserDefined, | 
					
						
							|  |  |  | 		Versioned:            dstOpts.Versioned, | 
					
						
							|  |  |  | 		VersionID:            dstOpts.VersionID, | 
					
						
							| 
									
										
										
										
											2020-11-20 03:50:22 +08:00
										 |  |  | 		MTime:                dstOpts.MTime, | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | 		NoLock:               true, | 
					
						
							| 
									
										
										
										
											2020-06-18 02:13:41 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	return z.serverPools[poolIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	marker := continuationToken | 
					
						
							|  |  |  | 	if marker == "" { | 
					
						
							|  |  |  | 		marker = startAfter | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	loi, err := z.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ListObjectsV2Info{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	listObjectsV2Info := ListObjectsV2Info{ | 
					
						
							|  |  |  | 		IsTruncated:           loi.IsTruncated, | 
					
						
							|  |  |  | 		ContinuationToken:     continuationToken, | 
					
						
							|  |  |  | 		NextContinuationToken: loi.NextMarker, | 
					
						
							|  |  |  | 		Objects:               loi.Objects, | 
					
						
							|  |  |  | 		Prefixes:              loi.Prefixes, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return listObjectsV2Info, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	loi := ListObjectVersionsInfo{} | 
					
						
							|  |  |  | 	if marker == "" && versionMarker != "" { | 
					
						
							|  |  |  | 		return loi, NotImplemented{} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-02-11 02:48:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-14 08:58:20 +08:00
										 |  |  | 	opts := listPathOptions{ | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		Bucket:      bucket, | 
					
						
							|  |  |  | 		Prefix:      prefix, | 
					
						
							|  |  |  | 		Separator:   delimiter, | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | 		Limit:       maxKeysPlusOne(maxKeys, marker != ""), | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		Marker:      marker, | 
					
						
							|  |  |  | 		InclDeleted: true, | 
					
						
							| 
									
										
										
										
											2020-11-04 00:53:48 +08:00
										 |  |  | 		AskDisks:    globalAPIConfig.getListQuorum(), | 
					
						
							| 
									
										
										
										
											2021-12-10 06:59:23 +08:00
										 |  |  | 		Versioned:   true, | 
					
						
							| 
									
										
										
										
											2020-11-14 08:58:20 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-02-11 02:48:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Shortcut for APN/1.0 Veeam/1.0 Backup/10.0
 | 
					
						
							|  |  |  | 	// It requests unique blocks with a specific prefix.
 | 
					
						
							|  |  |  | 	// We skip scanning the parent directory for
 | 
					
						
							|  |  |  | 	// more objects matching the prefix.
 | 
					
						
							|  |  |  | 	ri := logger.GetReqInfo(ctx) | 
					
						
							|  |  |  | 	if ri != nil && strings.Contains(ri.UserAgent, `1.0 Veeam/1.0 Backup`) && strings.HasSuffix(prefix, ".blk") { | 
					
						
							|  |  |  | 		opts.BaseDir = prefix | 
					
						
							|  |  |  | 		opts.Transient = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | 	// set bucket metadata in opts
 | 
					
						
							|  |  |  | 	opts.setBucketMeta(ctx) | 
					
						
							| 
									
										
										
										
											2020-11-14 08:58:20 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	merged, err := z.listPath(ctx, &opts) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	if err != nil && err != io.EOF { | 
					
						
							|  |  |  | 		return loi, err | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-14 02:39:27 +08:00
										 |  |  | 	defer merged.truncate(0) // Release when returning
 | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | 	if versionMarker == "" { | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		o := listPathOptions{Marker: marker} | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | 		// If we are not looking for a specific version skip it.
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		o.parseMarker() | 
					
						
							|  |  |  | 		merged.forwardPast(o.Marker) | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 	objects := merged.fileInfoVersions(bucket, prefix, delimiter, versionMarker) | 
					
						
							|  |  |  | 	loi.IsTruncated = err == nil && len(objects) > 0 | 
					
						
							|  |  |  | 	if maxKeys > 0 && len(objects) > maxKeys { | 
					
						
							|  |  |  | 		objects = objects[:maxKeys] | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		loi.IsTruncated = true | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 	for _, obj := range objects { | 
					
						
							| 
									
										
										
										
											2021-02-06 08:24:40 +08:00
										 |  |  | 		if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" { | 
					
						
							| 
									
										
										
										
											2022-11-23 00:51:04 +08:00
										 |  |  | 			// Only add each once.
 | 
					
						
							|  |  |  | 			// With slash delimiter we only get the directory once.
 | 
					
						
							|  |  |  | 			found := false | 
					
						
							|  |  |  | 			if delimiter != slashSeparator { | 
					
						
							|  |  |  | 				for _, p := range loi.Prefixes { | 
					
						
							|  |  |  | 					if found { | 
					
						
							|  |  |  | 						break | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					found = p == obj.Name | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !found { | 
					
						
							|  |  |  | 				loi.Prefixes = append(loi.Prefixes, obj.Name) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			loi.Objects = append(loi.Objects, obj) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if loi.IsTruncated { | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 		last := objects[len(objects)-1] | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		loi.NextMarker = opts.encodeMarker(last.Name) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		loi.NextVersionIDMarker = last.VersionID | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	return loi, nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-02 00:12:02 +08:00
										 |  |  | func maxKeysPlusOne(maxKeys int, addOne bool) int { | 
					
						
							|  |  |  | 	if maxKeys < 0 || maxKeys > maxObjectList { | 
					
						
							|  |  |  | 		maxKeys = maxObjectList | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if addOne { | 
					
						
							|  |  |  | 		maxKeys++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return maxKeys | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	var loi ListObjectsInfo | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | 	opts := listPathOptions{ | 
					
						
							|  |  |  | 		Bucket:      bucket, | 
					
						
							|  |  |  | 		Prefix:      prefix, | 
					
						
							|  |  |  | 		Separator:   delimiter, | 
					
						
							|  |  |  | 		Limit:       maxKeysPlusOne(maxKeys, marker != ""), | 
					
						
							|  |  |  | 		Marker:      marker, | 
					
						
							|  |  |  | 		InclDeleted: false, | 
					
						
							|  |  |  | 		AskDisks:    globalAPIConfig.getListQuorum(), | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	opts.setBucketMeta(ctx) | 
					
						
							| 
									
										
										
										
											2022-04-12 04:25:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-26 13:41:54 +08:00
										 |  |  | 	if len(prefix) > 0 && maxKeys == 1 && marker == "" { | 
					
						
							| 
									
										
										
										
											2021-08-19 09:05:05 +08:00
										 |  |  | 		// Optimization for certain applications like
 | 
					
						
							|  |  |  | 		// - Cohesity
 | 
					
						
							|  |  |  | 		// - Actifio, Splunk etc.
 | 
					
						
							|  |  |  | 		// which send ListObjects requests where the actual object
 | 
					
						
							|  |  |  | 		// itself is the prefix and max-keys=1 in such scenarios
 | 
					
						
							|  |  |  | 		// we can simply verify locally if such an object exists
 | 
					
						
							|  |  |  | 		// to avoid the need for ListObjects().
 | 
					
						
							|  |  |  | 		objInfo, err := z.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true}) | 
					
						
							|  |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2022-08-10 06:00:24 +08:00
										 |  |  | 			if opts.Lifecycle != nil { | 
					
						
							| 
									
										
										
										
											2022-10-22 01:46:53 +08:00
										 |  |  | 				evt := evalActionFromLifecycle(ctx, *opts.Lifecycle, opts.Retention, objInfo) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:30 +08:00
										 |  |  | 				if evt.Action.Delete() { | 
					
						
							| 
									
										
										
										
											2023-05-23 06:28:56 +08:00
										 |  |  | 					globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_s3ListObjects) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:30 +08:00
										 |  |  | 					if !evt.Action.DeleteRestored() { | 
					
						
							|  |  |  | 						// Skip entry if ILM action was DeleteVersionAction or DeleteAction
 | 
					
						
							|  |  |  | 						return loi, nil | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2022-03-23 03:39:45 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-08-19 09:05:05 +08:00
										 |  |  | 			loi.Objects = append(loi.Objects, objInfo) | 
					
						
							|  |  |  | 			return loi, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 	merged, err := z.listPath(ctx, &opts) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	if err != nil && err != io.EOF { | 
					
						
							| 
									
										
										
										
											2021-12-06 18:59:51 +08:00
										 |  |  | 		if !isErrBucketNotFound(err) { | 
					
						
							| 
									
										
										
										
											2023-06-25 11:29:13 +08:00
										 |  |  | 			logger.LogOnceIf(ctx, err, "erasure-list-objects-path"+bucket) | 
					
						
							| 
									
										
										
										
											2021-12-06 18:59:51 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		return loi, err | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	merged.forwardPast(opts.Marker) | 
					
						
							| 
									
										
										
										
											2021-08-14 02:39:27 +08:00
										 |  |  | 	defer merged.truncate(0) // Release when returning
 | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	// Default is recursive, if delimiter is set then list non recursive.
 | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 	objects := merged.fileInfos(bucket, prefix, delimiter) | 
					
						
							|  |  |  | 	loi.IsTruncated = err == nil && len(objects) > 0 | 
					
						
							|  |  |  | 	if maxKeys > 0 && len(objects) > maxKeys { | 
					
						
							|  |  |  | 		objects = objects[:maxKeys] | 
					
						
							|  |  |  | 		loi.IsTruncated = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for _, obj := range objects { | 
					
						
							| 
									
										
										
										
											2021-02-06 08:24:40 +08:00
										 |  |  | 		if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" { | 
					
						
							| 
									
										
										
										
											2022-11-23 00:51:04 +08:00
										 |  |  | 			// Only add each once.
 | 
					
						
							|  |  |  | 			// With slash delimiter we only get the directory once.
 | 
					
						
							|  |  |  | 			found := false | 
					
						
							|  |  |  | 			if delimiter != slashSeparator { | 
					
						
							|  |  |  | 				for _, p := range loi.Prefixes { | 
					
						
							|  |  |  | 					if found { | 
					
						
							|  |  |  | 						break | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					found = p == obj.Name | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !found { | 
					
						
							|  |  |  | 				loi.Prefixes = append(loi.Prefixes, obj.Name) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			loi.Objects = append(loi.Objects, obj) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if loi.IsTruncated { | 
					
						
							| 
									
										
										
										
											2020-12-20 01:36:04 +08:00
										 |  |  | 		last := objects[len(objects)-1] | 
					
						
							| 
									
										
										
										
											2021-07-06 06:34:41 +08:00
										 |  |  | 		loi.NextMarker = opts.encodeMarker(last.Name) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return loi, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil { | 
					
						
							|  |  |  | 		return ListMultipartsInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	poolResult := ListMultipartsInfo{} | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	poolResult.MaxUploads = maxUploads | 
					
						
							|  |  |  | 	poolResult.KeyMarker = keyMarker | 
					
						
							|  |  |  | 	poolResult.Prefix = prefix | 
					
						
							|  |  |  | 	poolResult.Delimiter = delimiter | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 		result, err := pool.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 			delimiter, maxUploads) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return result, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 		poolResult.Uploads = append(poolResult.Uploads, result.Uploads...) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	return poolResult, nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Initiate a new multipart upload on a hashedSet based on object name.
 | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil { | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2023-03-23 07:22:37 +08:00
										 |  |  | 		if !isMinioMetaBucketName(bucket) { | 
					
						
							|  |  |  | 			avail, err := hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), -1) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 				return nil, toObjectErr(errErasureWriteQuorum) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if !avail { | 
					
						
							|  |  |  | 				return nil, toObjectErr(errDiskFull) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-22 01:57:36 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 		if z.IsSuspended(idx) || z.IsPoolRebalancing(idx) { | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-10-26 03:36:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-22 01:57:36 +08:00
										 |  |  | 		result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 			return nil, err | 
					
						
							| 
									
										
										
										
											2021-04-22 01:57:36 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// If there is a multipart upload with the same bucket/object name,
 | 
					
						
							|  |  |  | 		// create the new multipart in the same pool, this will avoid
 | 
					
						
							|  |  |  | 		// creating two multiparts uploads in two different pools
 | 
					
						
							|  |  |  | 		if len(result.Uploads) != 0 { | 
					
						
							|  |  |  | 			return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-23 12:46:24 +08:00
										 |  |  | 	// any parallel writes on the object will block for this poolIdx
 | 
					
						
							|  |  |  | 	// to return since this holds a read lock on the namespace.
 | 
					
						
							| 
									
										
										
										
											2021-06-07 23:13:15 +08:00
										 |  |  | 	idx, err := z.getPoolIdx(ctx, bucket, object, -1) | 
					
						
							| 
									
										
										
										
											2021-05-07 01:45:33 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Copies a part of an object from source hashedSet to destination hashedSet.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (PartInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, z); err != nil { | 
					
						
							|  |  |  | 		return PartInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	return z.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, | 
					
						
							| 
									
										
										
										
											2023-05-27 01:57:07 +08:00
										 |  |  | 		srcInfo.PutObjReader, dstOpts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObjectPart - writes part of an object to hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (PartInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err := checkPutObjectPartArgs(ctx, bucket, object, z); err != nil { | 
					
						
							|  |  |  | 		return PartInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 		pi, err := pool.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 			return pi, nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(InvalidUploadID); ok { | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			// Look for information on the next pool
 | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Any other unhandled errors such as quorum return.
 | 
					
						
							|  |  |  | 		return PartInfo{}, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return PartInfo{}, InvalidUploadID{ | 
					
						
							|  |  |  | 		Bucket:   bucket, | 
					
						
							|  |  |  | 		Object:   object, | 
					
						
							|  |  |  | 		UploadID: uploadID, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 	if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { | 
					
						
							|  |  |  | 		return MultipartInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts) | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 		mi, err := pool.GetMultipartInfo(ctx, bucket, object, uploadID, opts) | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 		if err == nil { | 
					
						
							|  |  |  | 			return mi, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(InvalidUploadID); ok { | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			// upload id not found, continue to the next pool.
 | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// any other unhandled error return right here.
 | 
					
						
							|  |  |  | 		return MultipartInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return MultipartInfo{}, InvalidUploadID{ | 
					
						
							|  |  |  | 		Bucket:   bucket, | 
					
						
							|  |  |  | 		Object:   object, | 
					
						
							|  |  |  | 		UploadID: uploadID, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | // ListObjectParts - lists all uploaded parts to an object in hashedSet.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (ListPartsInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err := checkListPartsArgs(ctx, bucket, object, z); err != nil { | 
					
						
							|  |  |  | 		return ListPartsInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 		result, err := pool.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 			return result, nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(InvalidUploadID); ok { | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return ListPartsInfo{}, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return ListPartsInfo{}, InvalidUploadID{ | 
					
						
							|  |  |  | 		Bucket:   bucket, | 
					
						
							|  |  |  | 		Object:   object, | 
					
						
							|  |  |  | 		UploadID: uploadID, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Aborts an in-progress multipart operation on hashedSet based on the object name.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err := checkAbortMultipartArgs(ctx, bucket, object, z); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 		err := pool.AbortMultipartUpload(ctx, bucket, object, uploadID, opts) | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(InvalidUploadID); ok { | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			// upload id not found move to next pool
 | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return InvalidUploadID{ | 
					
						
							|  |  |  | 		Bucket:   bucket, | 
					
						
							|  |  |  | 		Object:   object, | 
					
						
							|  |  |  | 		UploadID: uploadID, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2020-05-20 04:53:54 +08:00
										 |  |  | 	if err = checkCompleteMultipartArgs(ctx, bucket, object, z); err != nil { | 
					
						
							|  |  |  | 		return objInfo, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-15 11:44:22 +08:00
										 |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 		objInfo, err = pool.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2021-04-22 01:57:36 +08:00
										 |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 			return objInfo, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if _, ok := err.(InvalidUploadID); ok { | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 			// upload id not found move to next pool
 | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-11-18 19:09:35 +08:00
										 |  |  | 		return objInfo, err | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-22 01:57:36 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	return objInfo, InvalidUploadID{ | 
					
						
							|  |  |  | 		Bucket:   bucket, | 
					
						
							|  |  |  | 		Object:   object, | 
					
						
							|  |  |  | 		UploadID: uploadID, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | // GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
 | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bucketInfo BucketInfo, err error) { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	bucketInfo, err = z.s3Peer.GetBucketInfo(ctx, bucket, opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return bucketInfo, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	meta, err := globalBucketMetadataSys.Get(bucket) | 
					
						
							|  |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		bucketInfo.Created = meta.Created | 
					
						
							| 
									
										
										
										
											2023-05-23 03:05:14 +08:00
										 |  |  | 		bucketInfo.Versioning = meta.Versioning() | 
					
						
							|  |  |  | 		bucketInfo.ObjectLocking = meta.ObjectLocking() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	return bucketInfo, nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | // DeleteBucket - deletes a bucket on all serverPools simultaneously,
 | 
					
						
							|  |  |  | // even if one of the serverPools fail to delete buckets, we proceed to
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | // undo a successful operation.
 | 
					
						
							| 
									
										
										
										
											2021-10-07 01:24:40 +08:00
										 |  |  | func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	if isMinioMetaBucketName(bucket) { | 
					
						
							|  |  |  | 		return BucketNameInvalid{Bucket: bucket} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	// Verify if bucket is valid.
 | 
					
						
							|  |  |  | 	if err := s3utils.CheckValidBucketName(bucket); err != nil { | 
					
						
							|  |  |  | 		return BucketNameInvalid{Bucket: bucket} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-28 12:52:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	if !opts.NoLock { | 
					
						
							|  |  |  | 		// Lock the bucket name before creating.
 | 
					
						
							|  |  |  | 		lk := z.NewNSLock(minioMetaTmpBucket, bucket+".lck") | 
					
						
							|  |  |  | 		lkctx, err := lk.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer lk.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	err := z.s3Peer.DeleteBucket(ctx, bucket, opts) | 
					
						
							| 
									
										
										
										
											2023-03-31 02:54:08 +08:00
										 |  |  | 	if err == nil || isErrBucketNotFound(err) { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		// If site replication is configured, hold on to deleted bucket state until sites sync
 | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 		if opts.SRDeleteOp == MarkDelete { | 
					
						
							| 
									
										
										
										
											2023-01-17 22:07:47 +08:00
										 |  |  | 			z.s3Peer.MakeBucket(context.Background(), pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket), MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-31 02:54:08 +08:00
										 |  |  | 	if err != nil && !isErrBucketNotFound(err) { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		if !opts.NoRecreate { | 
					
						
							|  |  |  | 			z.s3Peer.MakeBucket(ctx, bucket, MakeBucketOptions{}) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		// Purge the entire bucket metadata entirely.
 | 
					
						
							|  |  |  | 		z.deleteAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-11-02 07:41:01 +08:00
										 |  |  | // deleteAll will rename bucket+prefix unconditionally across all disks to
 | 
					
						
							| 
									
										
										
										
											2021-08-20 00:16:14 +08:00
										 |  |  | // minioMetaTmpDeletedBucket + unique uuid,
 | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | // Note that set distribution is ignored so it should only be used in cases where
 | 
					
						
							|  |  |  | // data is not distributed across sets. Errors are logged but individual
 | 
					
						
							|  |  |  | // disk failures are not returned.
 | 
					
						
							| 
									
										
										
										
											2022-11-02 07:41:01 +08:00
										 |  |  | func (z *erasureServerPools) deleteAll(ctx context.Context, bucket, prefix string) { | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 	for _, servers := range z.serverPools { | 
					
						
							|  |  |  | 		for _, set := range servers.sets { | 
					
						
							| 
									
										
										
										
											2022-11-01 23:00:02 +08:00
										 |  |  | 			set.deleteAll(ctx, bucket, prefix) | 
					
						
							| 
									
										
										
										
											2021-02-25 14:24:38 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | // List all buckets from one of the serverPools, we are not doing merge
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | // sort here just for simplification. As per design it is assumed
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | // that all buckets are present on all serverPools.
 | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | func (z *erasureServerPools) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) { | 
					
						
							| 
									
										
										
										
											2023-01-04 15:39:40 +08:00
										 |  |  | 	buckets, err = z.s3Peer.ListBuckets(ctx, opts) | 
					
						
							| 
									
										
										
										
											2020-05-09 04:44:44 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for i := range buckets { | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | 		createdAt, err := globalBucketMetadataSys.CreatedAt(buckets[i].Name) | 
					
						
							| 
									
										
										
										
											2020-05-09 04:44:44 +08:00
										 |  |  | 		if err == nil { | 
					
						
							| 
									
										
										
										
											2022-07-26 08:51:32 +08:00
										 |  |  | 			buckets[i].Created = createdAt | 
					
						
							| 
									
										
										
										
											2020-05-09 04:44:44 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-09 04:44:44 +08:00
										 |  |  | 	return buckets, nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	// Acquire lock on format.json
 | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	formatLock := z.NewNSLock(minioMetaBucket, formatConfigFile) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := formatLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		return madmin.HealResultItem{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer formatLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	r := madmin.HealResultItem{ | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		Type:   madmin.HealItemMetadata, | 
					
						
							|  |  |  | 		Detail: "disk-format", | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-01-16 09:19:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	var countNoHeal int | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	for _, pool := range z.serverPools { | 
					
						
							|  |  |  | 		result, err := pool.HealFormat(ctx, dryRun) | 
					
						
							| 
									
										
										
										
											2020-09-17 12:14:35 +08:00
										 |  |  | 		if err != nil && !errors.Is(err, errNoHealRequired) { | 
					
						
							| 
									
										
										
										
											2023-06-25 11:29:13 +08:00
										 |  |  | 			logger.LogOnceIf(ctx, err, "erasure-heal-format") | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		// Count errNoHealRequired across all serverPools,
 | 
					
						
							| 
									
										
										
										
											2020-01-16 09:19:13 +08:00
										 |  |  | 		// to return appropriate error to the caller
 | 
					
						
							| 
									
										
										
										
											2020-09-17 12:14:35 +08:00
										 |  |  | 		if errors.Is(err, errNoHealRequired) { | 
					
						
							| 
									
										
										
										
											2020-01-16 09:19:13 +08:00
										 |  |  | 			countNoHeal++ | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-21 02:10:26 +08:00
										 |  |  | 		r.DiskCount += result.DiskCount | 
					
						
							|  |  |  | 		r.SetCount += result.SetCount | 
					
						
							|  |  |  | 		r.Before.Drives = append(r.Before.Drives, result.Before.Drives...) | 
					
						
							|  |  |  | 		r.After.Drives = append(r.After.Drives, result.After.Drives...) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-17 12:14:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	// No heal returned by all serverPools, return errNoHealRequired
 | 
					
						
							|  |  |  | 	if countNoHeal == len(z.serverPools) { | 
					
						
							| 
									
										
										
										
											2020-01-16 09:19:13 +08:00
										 |  |  | 		return r, errNoHealRequired | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-17 12:14:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	return r, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-14 03:57:08 +08:00
										 |  |  | func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) { | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	r := madmin.HealResultItem{ | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		Type:   madmin.HealItemBucket, | 
					
						
							|  |  |  | 		Bucket: bucket, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-15 04:07:07 +08:00
										 |  |  | 	// Attempt heal on the bucket metadata, ignore any failures
 | 
					
						
							| 
									
										
										
										
											2022-08-03 14:10:22 +08:00
										 |  |  | 	hopts := opts | 
					
						
							|  |  |  | 	hopts.Recreate = false | 
					
						
							|  |  |  | 	defer z.HealObject(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, bucket, bucketMetadataFile), "", hopts) | 
					
						
							| 
									
										
										
										
											2020-12-14 03:57:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	for _, pool := range z.serverPools { | 
					
						
							|  |  |  | 		result, err := pool.HealBucket(ctx, bucket, opts) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2023-03-07 00:56:10 +08:00
										 |  |  | 			if _, ok := err.(BucketNotFound); ok { | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return result, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-11-21 02:10:26 +08:00
										 |  |  | 		r.DiskCount += result.DiskCount | 
					
						
							|  |  |  | 		r.SetCount += result.SetCount | 
					
						
							|  |  |  | 		r.Before.Drives = append(r.Before.Drives, result.Before.Drives...) | 
					
						
							|  |  |  | 		r.After.Drives = append(r.After.Drives, result.After.Drives...) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-15 04:07:07 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	return r, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | // Walk a bucket, optionally prefix recursively, until we have returned
 | 
					
						
							|  |  |  | // all the content to objectInfo channel, it is callers responsibility
 | 
					
						
							|  |  |  | // to allocate a receive channel for ObjectInfo, upon any unhandled
 | 
					
						
							|  |  |  | // error walker returns error. Optionally if context.Done() is received
 | 
					
						
							|  |  |  | // then Walk() stops the walker.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error { | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 	if err := checkListObjsArgs(ctx, bucket, prefix, "", z); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-26 11:58:58 +08:00
										 |  |  | 		// Upon error close the channel.
 | 
					
						
							|  |  |  | 		close(results) | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-07 06:14:56 +08:00
										 |  |  | 	vcfg, _ := globalBucketVersioningSys.Get(bucket) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(ctx) | 
					
						
							|  |  |  | 	go func() { | 
					
						
							|  |  |  | 		defer cancel() | 
					
						
							|  |  |  | 		defer close(results) | 
					
						
							| 
									
										
										
										
											2020-07-11 13:21:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 		for _, erasureSet := range z.serverPools { | 
					
						
							|  |  |  | 			var wg sync.WaitGroup | 
					
						
							|  |  |  | 			for _, set := range erasureSet.sets { | 
					
						
							|  |  |  | 				set := set | 
					
						
							|  |  |  | 				wg.Add(1) | 
					
						
							|  |  |  | 				go func() { | 
					
						
							|  |  |  | 					defer wg.Done() | 
					
						
							| 
									
										
										
										
											2020-07-11 13:21:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					disks, _ := set.getOnlineDisksWithHealing() | 
					
						
							|  |  |  | 					if len(disks) == 0 { | 
					
						
							|  |  |  | 						cancel() | 
					
						
							|  |  |  | 						return | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-11-04 00:53:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					loadEntry := func(entry metaCacheEntry) { | 
					
						
							|  |  |  | 						if entry.isDir() { | 
					
						
							|  |  |  | 							return | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2020-11-04 00:53:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 						fivs, err := entry.fileInfoVersions(bucket) | 
					
						
							|  |  |  | 						if err != nil { | 
					
						
							|  |  |  | 							cancel() | 
					
						
							|  |  |  | 							return | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2022-06-07 06:14:56 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-19 08:49:08 +08:00
										 |  |  | 						versionsSorter(fivs.Versions).reverse() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 						for _, version := range fivs.Versions { | 
					
						
							| 
									
										
										
										
											2022-10-03 17:10:15 +08:00
										 |  |  | 							send := true | 
					
						
							|  |  |  | 							if opts.WalkFilter != nil && !opts.WalkFilter(version) { | 
					
						
							|  |  |  | 								send = false | 
					
						
							|  |  |  | 							} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 							if !send { | 
					
						
							|  |  |  | 								continue | 
					
						
							|  |  |  | 							} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-07 06:14:56 +08:00
										 |  |  | 							versioned := vcfg != nil && vcfg.Versioned(version.Name) | 
					
						
							| 
									
										
										
										
											2022-10-03 17:10:15 +08:00
										 |  |  | 							objInfo := version.ToObjectInfo(bucket, version.Name, versioned) | 
					
						
							| 
									
										
										
										
											2022-08-19 08:49:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 							select { | 
					
						
							|  |  |  | 							case <-ctx.Done(): | 
					
						
							|  |  |  | 								return | 
					
						
							| 
									
										
										
										
											2022-10-03 17:10:15 +08:00
										 |  |  | 							case results <- objInfo: | 
					
						
							| 
									
										
										
										
											2022-08-19 08:49:08 +08:00
										 |  |  | 							} | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 						} | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					// How to resolve partial results.
 | 
					
						
							|  |  |  | 					resolver := metadataResolutionParams{ | 
					
						
							|  |  |  | 						dirQuorum: 1, | 
					
						
							|  |  |  | 						objQuorum: 1, | 
					
						
							|  |  |  | 						bucket:    bucket, | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					path := baseDirFromPrefix(prefix) | 
					
						
							| 
									
										
										
										
											2022-04-19 23:20:48 +08:00
										 |  |  | 					filterPrefix := strings.Trim(strings.TrimPrefix(prefix, path), slashSeparator) | 
					
						
							|  |  |  | 					if path == prefix { | 
					
						
							|  |  |  | 						filterPrefix = "" | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-11-04 00:53:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					lopts := listPathRawOptions{ | 
					
						
							|  |  |  | 						disks:          disks, | 
					
						
							|  |  |  | 						bucket:         bucket, | 
					
						
							|  |  |  | 						path:           path, | 
					
						
							| 
									
										
										
										
											2022-04-19 23:20:48 +08:00
										 |  |  | 						filterPrefix:   filterPrefix, | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 						recursive:      true, | 
					
						
							| 
									
										
										
										
											2022-10-03 17:10:15 +08:00
										 |  |  | 						forwardTo:      opts.WalkMarker, | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 						minDisks:       1, | 
					
						
							|  |  |  | 						reportNotFound: false, | 
					
						
							|  |  |  | 						agreed:         loadEntry, | 
					
						
							| 
									
										
										
										
											2022-07-08 04:45:34 +08:00
										 |  |  | 						partial: func(entries metaCacheEntries, _ []error) { | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 							entry, ok := entries.resolve(&resolver) | 
					
						
							|  |  |  | 							if !ok { | 
					
						
							|  |  |  | 								// check if we can get one entry atleast
 | 
					
						
							|  |  |  | 								// proceed to heal nonetheless.
 | 
					
						
							|  |  |  | 								entry, _ = entries.firstFound() | 
					
						
							|  |  |  | 							} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 							loadEntry(*entry) | 
					
						
							|  |  |  | 						}, | 
					
						
							|  |  |  | 						finished: nil, | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-11-04 00:53:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 					if err := listPathRaw(ctx, lopts); err != nil { | 
					
						
							|  |  |  | 						logger.LogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts)) | 
					
						
							| 
									
										
										
										
											2022-08-19 08:49:08 +08:00
										 |  |  | 						cancel() | 
					
						
							| 
									
										
										
										
											2021-10-13 00:53:17 +08:00
										 |  |  | 						return | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				}() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			wg.Wait() | 
					
						
							| 
									
										
										
										
											2020-02-25 23:52:28 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // HealObjectFn closure function heals the object.
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | type HealObjectFn func(bucket, object, versionID string) error | 
					
						
							| 
									
										
										
										
											2020-01-29 14:35:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObjectFn HealObjectFn) error { | 
					
						
							| 
									
										
										
										
											2022-12-17 00:52:12 +08:00
										 |  |  | 	healEntry := func(bucket string, entry metaCacheEntry) error { | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		if entry.isDir() { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// We might land at .metacache, .trash, .multipart
 | 
					
						
							|  |  |  | 		// no need to heal them skip, only when bucket
 | 
					
						
							|  |  |  | 		// is '.minio.sys'
 | 
					
						
							|  |  |  | 		if bucket == minioMetaBucket { | 
					
						
							|  |  |  | 			if wildcard.Match("buckets/*/.metacache/*", entry.name) { | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if wildcard.Match("tmp/*", entry.name) { | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if wildcard.Match("multipart/*", entry.name) { | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if wildcard.Match("tmp-old/*", entry.name) { | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		fivs, err := entry.fileInfoVersions(bucket) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return healObjectFn(bucket, entry.name, "") | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-11-29 02:20:55 +08:00
										 |  |  | 		if opts.Remove && !opts.DryRun { | 
					
						
							|  |  |  | 			err := z.CheckAbandonedParts(ctx, bucket, entry.name, opts) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err)) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		for _, version := range fivs.Versions { | 
					
						
							| 
									
										
										
										
											2022-06-20 23:07:45 +08:00
										 |  |  | 			err := healObjectFn(bucket, version.Name, version.VersionID) | 
					
						
							|  |  |  | 			if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 				return err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-09-22 05:55:17 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(ctx) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-02-26 04:20:41 +08:00
										 |  |  | 	var poolErrs [][]error | 
					
						
							|  |  |  | 	for idx, erasureSet := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		errs := make([]error, len(erasureSet.sets)) | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		var wg sync.WaitGroup | 
					
						
							| 
									
										
										
										
											2022-02-26 04:20:41 +08:00
										 |  |  | 		for idx, set := range erasureSet.sets { | 
					
						
							|  |  |  | 			wg.Add(1) | 
					
						
							|  |  |  | 			go func(idx int, set *erasureObjects) { | 
					
						
							|  |  |  | 				defer wg.Done() | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-25 10:31:04 +08:00
										 |  |  | 				errs[idx] = set.listAndHeal(bucket, prefix, healEntry) | 
					
						
							| 
									
										
										
										
											2022-02-26 04:20:41 +08:00
										 |  |  | 			}(idx, set) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		wg.Wait() | 
					
						
							| 
									
										
										
										
											2022-02-26 04:20:41 +08:00
										 |  |  | 		poolErrs = append(poolErrs, errs) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for _, errs := range poolErrs { | 
					
						
							|  |  |  | 		for _, err := range errs { | 
					
						
							|  |  |  | 			if err == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return err | 
					
						
							| 
									
										
										
										
											2022-02-17 00:40:18 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-02-26 04:20:41 +08:00
										 |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 	errs := make([]error, len(z.serverPools)) | 
					
						
							|  |  |  | 	results := make([]madmin.HealResultItem, len(z.serverPools)) | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							| 
									
										
										
										
											2022-01-12 04:27:47 +08:00
										 |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, pool *erasureSets) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			result, err := pool.HealObject(ctx, bucket, object, versionID, opts) | 
					
						
							|  |  |  | 			result.Object = decodeDirObject(result.Object) | 
					
						
							|  |  |  | 			errs[idx] = err | 
					
						
							|  |  |  | 			results[idx] = result | 
					
						
							|  |  |  | 		}(idx, pool) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-20 23:07:45 +08:00
										 |  |  | 	// Return the first nil error
 | 
					
						
							|  |  |  | 	for idx, err := range errs { | 
					
						
							|  |  |  | 		if err == nil { | 
					
						
							|  |  |  | 			return results[idx], nil | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-20 23:07:45 +08:00
										 |  |  | 	// No pool returned a nil error, return the first non 'not found' error
 | 
					
						
							|  |  |  | 	for idx, err := range errs { | 
					
						
							|  |  |  | 		if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 			return results[idx], err | 
					
						
							| 
									
										
										
										
											2021-12-26 01:01:44 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-20 23:07:45 +08:00
										 |  |  | 	// At this stage, all errors are 'not found'
 | 
					
						
							| 
									
										
										
										
											2020-10-23 04:36:24 +08:00
										 |  |  | 	if versionID != "" { | 
					
						
							|  |  |  | 		return madmin.HealResultItem{}, VersionNotFound{ | 
					
						
							|  |  |  | 			Bucket:    bucket, | 
					
						
							|  |  |  | 			Object:    object, | 
					
						
							|  |  |  | 			VersionID: versionID, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	return madmin.HealResultItem{}, ObjectNotFound{ | 
					
						
							|  |  |  | 		Bucket: bucket, | 
					
						
							|  |  |  | 		Object: object, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | func (z *erasureServerPools) getPoolAndSet(id string) (poolIdx, setIdx, diskIdx int, err error) { | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	for poolIdx := range z.serverPools { | 
					
						
							|  |  |  | 		format := z.serverPools[poolIdx].format | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		for setIdx, set := range format.Erasure.Sets { | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 			for i, diskID := range set { | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | 				if diskID == id { | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 					return poolIdx, setIdx, i, nil | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 	return -1, -1, -1, fmt.Errorf("DriveID(%s) %w", id, errDiskNotFound) | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-21 23:48:38 +08:00
										 |  |  | const ( | 
					
						
							|  |  |  | 	vmware = "VMWare" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | // HealthOptions takes input options to return sepcific information
 | 
					
						
							|  |  |  | type HealthOptions struct { | 
					
						
							| 
									
										
										
										
											2023-03-23 05:16:15 +08:00
										 |  |  | 	Maintenance    bool | 
					
						
							| 
									
										
										
										
											2023-03-21 23:48:38 +08:00
										 |  |  | 	DeploymentType string | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // HealthResult returns the current state of the system, also
 | 
					
						
							|  |  |  | // additionally with any specific heuristic information which
 | 
					
						
							|  |  |  | // was queried
 | 
					
						
							|  |  |  | type HealthResult struct { | 
					
						
							|  |  |  | 	Healthy       bool | 
					
						
							| 
									
										
										
										
											2020-08-08 04:22:53 +08:00
										 |  |  | 	HealingDrives int | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	PoolID, SetID int | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 	WriteQuorum   int | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	UsingDefaults bool | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-09 17:00:44 +08:00
										 |  |  | // ReadHealth returns if the cluster can serve read requests
 | 
					
						
							|  |  |  | func (z *erasureServerPools) ReadHealth(ctx context.Context) bool { | 
					
						
							|  |  |  | 	erasureSetUpCount := make([][]int, len(z.serverPools)) | 
					
						
							|  |  |  | 	for i := range z.serverPools { | 
					
						
							|  |  |  | 		erasureSetUpCount[i] = make([]int, len(z.serverPools[i].sets)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	diskIDs := globalNotificationSys.GetLocalDiskIDs(ctx) | 
					
						
							|  |  |  | 	diskIDs = append(diskIDs, getLocalDiskIDs(z)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, localDiskIDs := range diskIDs { | 
					
						
							|  |  |  | 		for _, id := range localDiskIDs { | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 			poolIdx, setIdx, _, err := z.getPoolAndSet(id) | 
					
						
							| 
									
										
										
										
											2021-02-09 17:00:44 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			erasureSetUpCount[poolIdx][setIdx]++ | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	b := z.BackendInfo() | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 	poolReadQuorums := make([]int, len(b.StandardSCData)) | 
					
						
							| 
									
										
										
										
											2022-12-06 03:18:50 +08:00
										 |  |  | 	copy(poolReadQuorums, b.StandardSCData) | 
					
						
							| 
									
										
										
										
											2021-02-09 17:00:44 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	for poolIdx := range erasureSetUpCount { | 
					
						
							|  |  |  | 		for setIdx := range erasureSetUpCount[poolIdx] { | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 			if erasureSetUpCount[poolIdx][setIdx] < poolReadQuorums[poolIdx] { | 
					
						
							| 
									
										
										
										
											2021-02-09 17:00:44 +08:00
										 |  |  | 				return false | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | // Health - returns current status of the object layer health,
 | 
					
						
							|  |  |  | // provides if write access exists across sets, additionally
 | 
					
						
							|  |  |  | // can be used to query scenarios if health may be lost
 | 
					
						
							|  |  |  | // if this node is taken down by an external orchestrator.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) HealthResult { | 
					
						
							|  |  |  | 	erasureSetUpCount := make([][]int, len(z.serverPools)) | 
					
						
							|  |  |  | 	for i := range z.serverPools { | 
					
						
							|  |  |  | 		erasureSetUpCount[i] = make([]int, len(z.serverPools[i].sets)) | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-05 05:58:34 +08:00
										 |  |  | 	diskIDs := globalNotificationSys.GetLocalDiskIDs(ctx) | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 	if !opts.Maintenance { | 
					
						
							|  |  |  | 		diskIDs = append(diskIDs, getLocalDiskIDs(z)) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 	for _, localDiskIDs := range diskIDs { | 
					
						
							|  |  |  | 		for _, id := range localDiskIDs { | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 			poolIdx, setIdx, _, err := z.getPoolAndSet(id) | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 			erasureSetUpCount[poolIdx][setIdx]++ | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-14 06:21:20 +08:00
										 |  |  | 	reqInfo := (&logger.ReqInfo{}).AppendTags("maintenance", strconv.FormatBool(opts.Maintenance)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 	b := z.BackendInfo() | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 	poolWriteQuorums := make([]int, len(b.StandardSCData)) | 
					
						
							|  |  |  | 	for i, data := range b.StandardSCData { | 
					
						
							|  |  |  | 		poolWriteQuorums[i] = data | 
					
						
							|  |  |  | 		if data == b.StandardSCParity { | 
					
						
							|  |  |  | 			poolWriteQuorums[i] = data + 1 | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-03 13:54:56 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-05 08:09:02 +08:00
										 |  |  | 	var aggHealStateResult madmin.BgHealState | 
					
						
							| 
									
										
										
										
											2023-03-21 23:48:38 +08:00
										 |  |  | 	// Check if disks are healing on in-case of VMware vsphere deployments.
 | 
					
						
							|  |  |  | 	if opts.Maintenance && opts.DeploymentType == vmware { | 
					
						
							| 
									
										
										
										
											2020-09-05 08:09:02 +08:00
										 |  |  | 		// check if local disks are being healed, if they are being healed
 | 
					
						
							|  |  |  | 		// we need to tell healthy status as 'false' so that this server
 | 
					
						
							|  |  |  | 		// is not taken down for maintenance
 | 
					
						
							|  |  |  | 		var err error | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 		aggHealStateResult, err = getAggregatedBackgroundHealState(ctx, nil) | 
					
						
							| 
									
										
										
										
											2020-09-05 08:09:02 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Unable to verify global heal status: %w", err)) | 
					
						
							|  |  |  | 			return HealthResult{ | 
					
						
							|  |  |  | 				Healthy: false, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if len(aggHealStateResult.HealDisks) > 0 { | 
					
						
							|  |  |  | 			logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", len(aggHealStateResult.HealDisks))) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	var usingDefaults bool | 
					
						
							|  |  |  | 	if globalStorageClass.GetParityForSC(storageclass.STANDARD) < 0 { | 
					
						
							|  |  |  | 		usingDefaults = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 	for poolIdx := range erasureSetUpCount { | 
					
						
							|  |  |  | 		for setIdx := range erasureSetUpCount[poolIdx] { | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 			if erasureSetUpCount[poolIdx][setIdx] < poolWriteQuorums[poolIdx] { | 
					
						
							| 
									
										
										
										
											2020-08-14 06:21:20 +08:00
										 |  |  | 				logger.LogIf(logger.SetReqInfo(ctx, reqInfo), | 
					
						
							| 
									
										
										
										
											2021-01-07 01:35:47 +08:00
										 |  |  | 					fmt.Errorf("Write quorum may be lost on pool: %d, set: %d, expected write quorum: %d", | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 						poolIdx, setIdx, poolWriteQuorums[poolIdx])) | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 				return HealthResult{ | 
					
						
							| 
									
										
										
										
											2020-09-05 08:09:02 +08:00
										 |  |  | 					Healthy:       false, | 
					
						
							|  |  |  | 					HealingDrives: len(aggHealStateResult.HealDisks), | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 					PoolID:        poolIdx, | 
					
						
							| 
									
										
										
										
											2020-09-05 08:09:02 +08:00
										 |  |  | 					SetID:         setIdx, | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 					WriteQuorum:   poolWriteQuorums[poolIdx], | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 					UsingDefaults: usingDefaults, // indicates if config was not initialized and we are using defaults on this node.
 | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-05-24 08:38:39 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-08-08 04:22:53 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 	var maximumWriteQuorum int | 
					
						
							|  |  |  | 	for _, writeQuorum := range poolWriteQuorums { | 
					
						
							|  |  |  | 		if maximumWriteQuorum == 0 { | 
					
						
							|  |  |  | 			maximumWriteQuorum = writeQuorum | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if writeQuorum > maximumWriteQuorum { | 
					
						
							|  |  |  | 			maximumWriteQuorum = writeQuorum | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-13 07:53:15 +08:00
										 |  |  | 	// when maintenance is not specified we don't have
 | 
					
						
							|  |  |  | 	// to look at the healing side of the code.
 | 
					
						
							|  |  |  | 	if !opts.Maintenance { | 
					
						
							|  |  |  | 		return HealthResult{ | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 			Healthy:       true, | 
					
						
							|  |  |  | 			WriteQuorum:   maximumWriteQuorum, | 
					
						
							|  |  |  | 			UsingDefaults: usingDefaults, // indicates if config was not initialized and we are using defaults on this node.
 | 
					
						
							| 
									
										
										
										
											2020-08-13 07:53:15 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 	return HealthResult{ | 
					
						
							| 
									
										
										
										
											2020-09-03 13:54:56 +08:00
										 |  |  | 		Healthy:       len(aggHealStateResult.HealDisks) == 0, | 
					
						
							| 
									
										
										
										
											2020-08-08 04:22:53 +08:00
										 |  |  | 		HealingDrives: len(aggHealStateResult.HealDisks), | 
					
						
							| 
									
										
										
										
											2022-06-08 10:08:21 +08:00
										 |  |  | 		WriteQuorum:   maximumWriteQuorum, | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 		UsingDefaults: usingDefaults, // indicates if config was not initialized and we are using defaults on this node.
 | 
					
						
							| 
									
										
										
										
											2020-07-21 09:31:22 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-29 00:54:43 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | // PutObjectMetadata - replace or add tags to an existing object
 | 
					
						
							|  |  |  | func (z *erasureServerPools) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							|  |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 	if z.SinglePool() { | 
					
						
							|  |  |  | 		return z.serverPools[0].PutObjectMetadata(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-17 22:30:53 +08:00
										 |  |  | 	opts.MetadataChg = true | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	// We don't know the size here set 1GiB atleast.
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].PutObjectMetadata(ctx, bucket, object, opts) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // PutObjectTags - replace or add tags to an existing object
 | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-17 22:30:53 +08:00
										 |  |  | 	opts.MetadataChg = true | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	// We don't know the size here set 1GiB atleast.
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].PutObjectTags(ctx, bucket, object, tags, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // DeleteObjectTags - delete object tags from an existing object
 | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-17 22:30:53 +08:00
										 |  |  | 	opts.MetadataChg = true | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].DeleteObjectTags(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // GetObjectTags - get object tags from an existing object
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = encodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2021-01-27 12:47:42 +08:00
										 |  |  | 	if z.SinglePool() { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].GetObjectTags(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // TransitionObject - transition object content to target tier.
 | 
					
						
							|  |  |  | func (z *erasureServerPools) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 	if z.SinglePool() { | 
					
						
							|  |  |  | 		return z.serverPools[0].TransitionObject(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-17 10:35:24 +08:00
										 |  |  | 	// Avoid transitioning an object from a pool being decommissioned.
 | 
					
						
							|  |  |  | 	opts.SkipDecommissioned = true | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].TransitionObject(ctx, bucket, object, opts) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // RestoreTransitionedObject - restore transitioned object content locally on this cluster.
 | 
					
						
							|  |  |  | func (z *erasureServerPools) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 	if z.SinglePool() { | 
					
						
							|  |  |  | 		return z.serverPools[0].RestoreTransitionedObject(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-17 10:35:24 +08:00
										 |  |  | 	// Avoid restoring object from a pool being decommissioned.
 | 
					
						
							|  |  |  | 	opts.SkipDecommissioned = true | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 	idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].RestoreTransitionedObject(ctx, bucket, object, opts) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2022-11-29 02:20:55 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | func (z *erasureServerPools) CheckAbandonedParts(ctx context.Context, bucket, object string, opts madmin.HealOpts) error { | 
					
						
							|  |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 	if z.SinglePool() { | 
					
						
							|  |  |  | 		return z.serverPools[0].CheckAbandonedParts(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	errs := make([]error, len(z.serverPools)) | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	for idx, pool := range z.serverPools { | 
					
						
							|  |  |  | 		if z.IsSuspended(idx) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(idx int, pool *erasureSets) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			err := pool.CheckAbandonedParts(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 			if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 				errs[idx] = err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}(idx, pool) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 	for _, err := range errs { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2023-03-16 22:48:05 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // DecomTieredObject - moves tiered object to another pool during decommissioning.
 | 
					
						
							|  |  |  | func (z *erasureServerPools) DecomTieredObject(ctx context.Context, bucket, object string, fi FileInfo, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	object = encodeDirObject(object) | 
					
						
							|  |  |  | 	if z.SinglePool() { | 
					
						
							|  |  |  | 		return fmt.Errorf("error decommissioning %s/%s", bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !opts.NoLock { | 
					
						
							|  |  |  | 		ns := z.NewNSLock(bucket, object) | 
					
						
							|  |  |  | 		lkctx, err := ns.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer ns.Unlock(lkctx) | 
					
						
							|  |  |  | 		opts.NoLock = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	idx, err := z.getPoolIdxNoLock(ctx, bucket, object, fi.Size) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return z.serverPools[idx].DecomTieredObject(ctx, bucket, object, fi, opts) | 
					
						
							|  |  |  | } |