| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2021-01-12 14:36:51 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2021-05-28 04:38:04 +08:00
										 |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-15 02:01:25 +08:00
										 |  |  | 	"github.com/klauspost/readahead" | 
					
						
							| 
									
										
										
										
											2021-05-06 23:52:02 +08:00
										 |  |  | 	"github.com/minio/madmin-go" | 
					
						
							| 
									
										
										
										
											2020-07-15 00:38:05 +08:00
										 |  |  | 	"github.com/minio/minio-go/v7/pkg/tags" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/bucket/lifecycle" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/bucket/replication" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/event" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/hash" | 
					
						
							|  |  |  | 	xhttp "github.com/minio/minio/internal/http" | 
					
						
							|  |  |  | 	xioutil "github.com/minio/minio/internal/ioutil" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/logger" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/sync/errgroup" | 
					
						
							| 
									
										
										
										
											2021-05-29 06:17:01 +08:00
										 |  |  | 	"github.com/minio/pkg/mimedb" | 
					
						
							| 
									
										
										
										
											2021-07-08 16:04:37 +08:00
										 |  |  | 	uatomic "go.uber.org/atomic" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-21 08:57:12 +08:00
										 |  |  | // list all errors which can be ignored in object operations.
 | 
					
						
							| 
									
										
										
										
											2020-07-25 04:16:11 +08:00
										 |  |  | var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) | 
					
						
							| 
									
										
										
										
											2016-11-21 08:57:12 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-17 01:28:29 +08:00
										 |  |  | // Object Operations
 | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-25 05:19:52 +08:00
										 |  |  | func countOnlineDisks(onlineDisks []StorageAPI) (online int) { | 
					
						
							|  |  |  | 	for _, onlineDisk := range onlineDisks { | 
					
						
							|  |  |  | 		if onlineDisk != nil && onlineDisk.IsOnline() { | 
					
						
							|  |  |  | 			online++ | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return online | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | // CopyObject - copy object source object to destination object.
 | 
					
						
							|  |  |  | // if source object and destination object are same we only
 | 
					
						
							|  |  |  | // update metadata.
 | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 	// This call shouldn't be used for anything other than metadata updates or adding self referential versions.
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	if !srcInfo.metadataOnly { | 
					
						
							|  |  |  | 		return oi, NotImplemented{} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-10-28 15:09:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-17 23:25:48 +08:00
										 |  |  | 	defer NSUpdated(dstBucket, dstObject) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if !dstOpts.NoLock { | 
					
						
							|  |  |  | 		lk := er.NewNSLock(dstBucket, dstObject) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		lkctx, err := lk.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return oi, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer lk.Unlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2020-09-16 11:44:48 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 	metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true) | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// get Quorum for this object
 | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 	readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// List all online disks.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// Pick latest valid metadata.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if srcOpts.VersionID == "" { | 
					
						
							|  |  |  | 			return oi, toObjectErr(errFileNotFound, srcBucket, srcObject) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject) | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-03 09:24:02 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	filterOnlineDisksInplace(fi, metaArr, onlineDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 	versionID := srcInfo.VersionID | 
					
						
							|  |  |  | 	if srcInfo.versionOnly { | 
					
						
							|  |  |  | 		versionID = dstOpts.VersionID | 
					
						
							|  |  |  | 		// preserve destination versionId if specified.
 | 
					
						
							|  |  |  | 		if versionID == "" { | 
					
						
							|  |  |  | 			versionID = mustGetUUID() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		modTime = UTCNow() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	fi.VersionID = versionID // set any new versionID we might have created
 | 
					
						
							|  |  |  | 	fi.ModTime = modTime     // set modTime for the new versionID
 | 
					
						
							| 
									
										
										
										
											2020-11-20 03:50:22 +08:00
										 |  |  | 	if !dstOpts.MTime.IsZero() { | 
					
						
							|  |  |  | 		modTime = dstOpts.MTime | 
					
						
							|  |  |  | 		fi.ModTime = dstOpts.MTime | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-30 06:49:18 +08:00
										 |  |  | 	fi.Metadata = srcInfo.UserDefined | 
					
						
							| 
									
										
										
										
											2020-09-16 11:44:48 +08:00
										 |  |  | 	srcInfo.UserDefined["etag"] = srcInfo.ETag | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Update `xl.meta` content on each disks.
 | 
					
						
							|  |  |  | 	for index := range metaArr { | 
					
						
							| 
									
										
										
										
											2020-11-21 01:10:48 +08:00
										 |  |  | 		if metaArr[index].IsValid() { | 
					
						
							|  |  |  | 			metaArr[index].ModTime = modTime | 
					
						
							|  |  |  | 			metaArr[index].VersionID = versionID | 
					
						
							|  |  |  | 			metaArr[index].Metadata = srcInfo.UserDefined | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 	// Write unique `xl.meta` for each disk.
 | 
					
						
							| 
									
										
										
										
											2021-04-21 01:44:39 +08:00
										 |  |  | 	if _, err = writeUniqueFileInfo(ctx, onlineDisks, srcBucket, srcObject, metaArr, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-23 10:17:09 +08:00
										 |  |  | 	// we are adding a new version to this object under the namespace lock, so this is the latest version.
 | 
					
						
							|  |  |  | 	fi.IsLatest = true | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return fi.ToObjectInfo(srcBucket, srcObject), nil | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | // GetObjectNInfo - returns object info and an object
 | 
					
						
							|  |  |  | // Read(Closer). When err != nil, the returned reader is always nil.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 	var unlockOnDefer bool | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	nsUnlocker := func() {} | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 	defer func() { | 
					
						
							|  |  |  | 		if unlockOnDefer { | 
					
						
							|  |  |  | 			nsUnlocker() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Acquire lock
 | 
					
						
							|  |  |  | 	if lockType != noLock { | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 		lock := er.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 		switch lockType { | 
					
						
							|  |  |  | 		case writeLock: | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 			lkctx, err := lock.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 				return nil, err | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 			ctx = lkctx.Context() | 
					
						
							|  |  |  | 			nsUnlocker = func() { lock.Unlock(lkctx.Cancel) } | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 		case readLock: | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 			lkctx, err := lock.GetRLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 				return nil, err | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 			ctx = lkctx.Context() | 
					
						
							|  |  |  | 			nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) } | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		unlockOnDefer = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-22 02:08:26 +08:00
										 |  |  | 	if !fi.DataShardFixed() { | 
					
						
							|  |  |  | 		diskMTime := pickValidDiskTimeWithQuorum(metaArr, fi.Erasure.DataBlocks) | 
					
						
							|  |  |  | 		if !diskMTime.Equal(timeSentinel) && !diskMTime.IsZero() { | 
					
						
							|  |  |  | 			for index := range onlineDisks { | 
					
						
							|  |  |  | 				if onlineDisks[index] == OfflineDisk { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				if !metaArr[index].IsValid() { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-12-23 03:43:01 +08:00
										 |  |  | 				if !metaArr[index].AcceptableDelta(diskMTime, shardDiskTimeDelta) { | 
					
						
							| 
									
										
										
										
											2021-12-22 02:08:26 +08:00
										 |  |  | 					// If disk mTime mismatches it is considered outdated
 | 
					
						
							|  |  |  | 					// https://github.com/minio/minio/pull/13803
 | 
					
						
							|  |  |  | 					//
 | 
					
						
							|  |  |  | 					// This check only is active if we could find maximally
 | 
					
						
							|  |  |  | 					// occurring disk mtimes that are somewhat same across
 | 
					
						
							|  |  |  | 					// the quorum. Allowing to skip those shards which we
 | 
					
						
							|  |  |  | 					// might think are wrong.
 | 
					
						
							|  |  |  | 					onlineDisks[index] = OfflineDisk | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	objInfo := fi.ToObjectInfo(bucket, object) | 
					
						
							|  |  |  | 	if objInfo.DeleteMarker { | 
					
						
							|  |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return &GetObjectReader{ | 
					
						
							|  |  |  | 				ObjInfo: objInfo, | 
					
						
							|  |  |  | 			}, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 		return &GetObjectReader{ | 
					
						
							|  |  |  | 			ObjInfo: objInfo, | 
					
						
							|  |  |  | 		}, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-04 23:40:42 +08:00
										 |  |  | 	if objInfo.IsRemote() { | 
					
						
							|  |  |  | 		gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, h, objInfo, opts) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-04 23:40:42 +08:00
										 |  |  | 		unlockOnDefer = false | 
					
						
							|  |  |  | 		return gr.WithCleanupFuncs(nsUnlocker), nil | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-01 09:37:58 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	fn, off, length, err := NewGetObjectReader(rs, objInfo, opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-01 09:37:58 +08:00
										 |  |  | 	unlockOnDefer = false | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	pr, pw := xioutil.WaitPipe() | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2021-05-01 09:37:58 +08:00
										 |  |  | 		pw.CloseWithError(er.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, fi, metaArr, onlineDisks)) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2020-04-21 13:01:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-22 02:42:06 +08:00
										 |  |  | 	// Cleanup function to cause the go routine above to exit, in
 | 
					
						
							|  |  |  | 	// case of incomplete read.
 | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 	pipeCloser := func() { | 
					
						
							|  |  |  | 		pr.CloseWithError(nil) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 	return fn(pr, h, pipeCloser, nsUnlocker) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error { | 
					
						
							| 
									
										
										
										
											2020-10-28 15:09:15 +08:00
										 |  |  | 	// Reorder online disks based on erasure distribution order.
 | 
					
						
							| 
									
										
										
										
											2016-07-25 13:49:27 +08:00
										 |  |  | 	// Reorder parts metadata based on erasure distribution order.
 | 
					
						
							| 
									
										
										
										
											2021-03-16 11:03:13 +08:00
										 |  |  | 	onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi) | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-22 03:29:32 +08:00
										 |  |  | 	// For negative length read everything.
 | 
					
						
							|  |  |  | 	if length < 0 { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		length = fi.Size - startOffset | 
					
						
							| 
									
										
										
										
											2016-07-08 22:46:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-22 03:29:32 +08:00
										 |  |  | 	// Reply back invalid range if the input offset and length fall out of range.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if startOffset > fi.Size || startOffset+length > fi.Size { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application) | 
					
						
							|  |  |  | 		return InvalidRange{startOffset, length, fi.Size} | 
					
						
							| 
									
										
										
										
											2016-07-08 22:46:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	// Get start part index and offset.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return InvalidRange{startOffset, length, fi.Size} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-28 02:51:02 +08:00
										 |  |  | 	// Calculate endOffset according to length
 | 
					
						
							|  |  |  | 	endOffset := startOffset | 
					
						
							|  |  |  | 	if length > 0 { | 
					
						
							|  |  |  | 		endOffset += length - 1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	// Get last part index to read given length.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset) | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return InvalidRange{startOffset, length, fi.Size} | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 	var totalBytesRead int64 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 	var healOnce sync.Once | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-15 07:50:47 +08:00
										 |  |  | 	// once we have obtained a common FileInfo i.e latest, we should stick
 | 
					
						
							|  |  |  | 	// to single dataDir to read the content to avoid reading from some other
 | 
					
						
							|  |  |  | 	// dataDir that has stale FileInfo{} to ensure that we fail appropriately
 | 
					
						
							|  |  |  | 	// during reads and expect the same dataDir everywhere.
 | 
					
						
							|  |  |  | 	dataDir := fi.DataDir | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	for ; partIndex <= lastPartIndex; partIndex++ { | 
					
						
							| 
									
										
										
										
											2016-06-22 05:34:11 +08:00
										 |  |  | 		if length == totalBytesRead { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partNumber := fi.Parts[partIndex].Number | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		// Save the current part name and size.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partSize := fi.Parts[partIndex].Size | 
					
						
							| 
									
										
										
										
											2016-06-23 00:05:03 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 		partLength := partSize - partOffset | 
					
						
							|  |  |  | 		// partLength should be adjusted so that we don't write more data than what was requested.
 | 
					
						
							|  |  |  | 		if partLength > (length - totalBytesRead) { | 
					
						
							|  |  |  | 			partLength = length - totalBytesRead | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize) | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 		// Get the checksums of the current part.
 | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		readers := make([]io.ReaderAt, len(onlineDisks)) | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 		prefer := make([]bool, len(onlineDisks)) | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 		for index, disk := range onlineDisks { | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 			if disk == OfflineDisk { | 
					
						
							| 
									
										
										
										
											2016-07-25 13:49:27 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-11-21 01:10:48 +08:00
										 |  |  | 			if !metaArr[index].IsValid() { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 			checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber) | 
					
						
							| 
									
										
										
										
											2021-05-15 07:50:47 +08:00
										 |  |  | 			partPath := pathJoin(object, dataDir, fmt.Sprintf("part.%d", partNumber)) | 
					
						
							|  |  |  | 			readers[index] = newBitrotReader(disk, metaArr[index].Data, bucket, partPath, tillOffset, | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 				checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// Prefer local disks
 | 
					
						
							|  |  |  | 			prefer[index] = disk.Hostname() == "" | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		written, err := erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		// Note: we should not be defer'ing the following closeBitrotReaders() call as
 | 
					
						
							|  |  |  | 		// we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time
 | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		// we return from this function.
 | 
					
						
							|  |  |  | 		closeBitrotReaders(readers) | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 			// If we have successfully written all the content that was asked
 | 
					
						
							|  |  |  | 			// by the client, but we still see an error - this would mean
 | 
					
						
							|  |  |  | 			// that we have some parts or data blocks missing or corrupted
 | 
					
						
							|  |  |  | 			// - attempt a heal to successfully heal them for future calls.
 | 
					
						
							|  |  |  | 			if written == partLength { | 
					
						
							|  |  |  | 				var scan madmin.HealScanMode | 
					
						
							| 
									
										
										
										
											2021-10-14 10:49:14 +08:00
										 |  |  | 				switch { | 
					
						
							|  |  |  | 				case errors.Is(err, errFileNotFound): | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 					scan = madmin.HealNormalScan | 
					
						
							| 
									
										
										
										
											2021-10-14 10:49:14 +08:00
										 |  |  | 				case errors.Is(err, errFileCorrupt): | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 					scan = madmin.HealDeepScan | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-10-14 10:49:14 +08:00
										 |  |  | 				switch scan { | 
					
						
							|  |  |  | 				case madmin.HealNormalScan, madmin.HealDeepScan: | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 					healOnce.Do(func() { | 
					
						
							| 
									
										
										
										
											2021-02-18 02:18:12 +08:00
										 |  |  | 						if _, healing := er.getOnlineDisksWithHealing(); !healing { | 
					
						
							|  |  |  | 							go healObject(bucket, object, fi.VersionID, scan) | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 					}) | 
					
						
							| 
									
										
										
										
											2021-10-14 10:49:14 +08:00
										 |  |  | 					// Healing is triggered and we have written
 | 
					
						
							|  |  |  | 					// successfully the content to client for
 | 
					
						
							|  |  |  | 					// the specific part, we should `nil` this error
 | 
					
						
							|  |  |  | 					// and proceed forward, instead of throwing errors.
 | 
					
						
							|  |  |  | 					err = nil | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-04-02 03:14:00 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		for i, r := range readers { | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 			if r == nil { | 
					
						
							|  |  |  | 				onlineDisks[i] = OfflineDisk | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-07-28 17:20:34 +08:00
										 |  |  | 		// Track total bytes read from disk and written to the client.
 | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 		totalBytesRead += partLength | 
					
						
							| 
									
										
										
										
											2016-06-23 00:05:03 +08:00
										 |  |  | 		// partOffset will be valid only for the first part, hence reset it to 0 for
 | 
					
						
							|  |  |  | 		// the remaining parts.
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		partOffset = 0 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 	} // End of read all parts loop.
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 	// Return success.
 | 
					
						
							| 
									
										
										
										
											2016-05-29 06:13:15 +08:00
										 |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // GetObjectInfo - reads object metadata and replies back ObjectInfo.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 	if !opts.NoLock { | 
					
						
							|  |  |  | 		// Lock the object before reading.
 | 
					
						
							|  |  |  | 		lk := er.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-02-16 18:43:47 +08:00
										 |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer lk.RUnlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	return er.getObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-02-09 12:08:23 +08:00
										 |  |  | func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object string, metaArr []FileInfo, errs []error, dataErrs []error, opts ObjectOptions) (FileInfo, error) { | 
					
						
							|  |  |  | 	var err error | 
					
						
							|  |  |  | 	m, ok := isObjectDangling(metaArr, errs, dataErrs) | 
					
						
							|  |  |  | 	if ok { | 
					
						
							|  |  |  | 		err = errFileNotFound | 
					
						
							|  |  |  | 		if opts.VersionID != "" { | 
					
						
							|  |  |  | 			err = errFileVersionNotFound | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer NSUpdated(bucket, object) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if opts.VersionID != "" { | 
					
						
							|  |  |  | 			er.deleteObjectVersion(ctx, bucket, object, 1, FileInfo{ | 
					
						
							|  |  |  | 				VersionID: opts.VersionID, | 
					
						
							|  |  |  | 			}, false) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			er.deleteObjectVersion(ctx, bucket, object, 1, FileInfo{ | 
					
						
							|  |  |  | 				VersionID: m.VersionID, | 
					
						
							|  |  |  | 			}, false) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return m, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions, readData bool) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	disks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2018-10-20 02:00:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, readData) | 
					
						
							| 
									
										
										
										
											2018-10-20 02:00:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 	readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) | 
					
						
							| 
									
										
										
										
											2019-02-06 09:58:48 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2022-02-09 12:08:23 +08:00
										 |  |  | 		if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) { | 
					
						
							|  |  |  | 			_, derr := er.deleteIfDangling(ctx, bucket, object, metaArr, errs, nil, opts) | 
					
						
							|  |  |  | 			if derr != nil { | 
					
						
							|  |  |  | 				err = derr | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return fi, nil, nil, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-02-02 02:47:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { | 
					
						
							| 
									
										
										
										
											2022-01-11 01:07:49 +08:00
										 |  |  | 		if errors.Is(reducedErr, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) { | 
					
						
							| 
									
										
										
										
											2022-02-09 12:08:23 +08:00
										 |  |  | 			_, derr := er.deleteIfDangling(ctx, bucket, object, metaArr, errs, nil, opts) | 
					
						
							|  |  |  | 			if derr != nil { | 
					
						
							|  |  |  | 				err = derr | 
					
						
							| 
									
										
										
										
											2020-11-06 03:48:55 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return fi, nil, nil, toObjectErr(reducedErr, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	// List all online disks.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2018-02-02 02:47:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Pick latest valid metadata.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2016-05-25 16:33:39 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return fi, nil, nil, err | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	filterOnlineDisksInplace(fi, metaArr, onlineDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-30 00:54:16 +08:00
										 |  |  | 	// if one of the disk is offline, return right here no need
 | 
					
						
							|  |  |  | 	// to attempt a heal on the object.
 | 
					
						
							|  |  |  | 	if countErrs(errs, errDiskNotFound) > 0 { | 
					
						
							|  |  |  | 		return fi, metaArr, onlineDisks, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 	var missingBlocks int | 
					
						
							|  |  |  | 	for i, err := range errs { | 
					
						
							|  |  |  | 		if err != nil && errors.Is(err, errFileNotFound) { | 
					
						
							|  |  |  | 			missingBlocks++ | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 		if metaArr[i].IsValid() && metaArr[i].ModTime.Equal(fi.ModTime) { | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		missingBlocks++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// if missing metadata can be reconstructed, attempt to reconstruct.
 | 
					
						
							| 
									
										
										
										
											2021-07-26 23:01:41 +08:00
										 |  |  | 	// additionally do not heal delete markers inline, let them be
 | 
					
						
							|  |  |  | 	// healed upon regular heal process.
 | 
					
						
							|  |  |  | 	if !fi.Deleted && missingBlocks > 0 && missingBlocks < readQuorum { | 
					
						
							| 
									
										
										
										
											2021-02-18 02:18:12 +08:00
										 |  |  | 		if _, healing := er.getOnlineDisksWithHealing(); !healing { | 
					
						
							|  |  |  | 			go healObject(bucket, object, fi.VersionID, madmin.HealNormalScan) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-28 02:21:14 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return fi, metaArr, onlineDisks, nil | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 		return objInfo, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-11-22 15:48:50 +08:00
										 |  |  | 	objInfo = fi.ToObjectInfo(bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 		if opts.VersionID == "" || opts.DeleteMarker { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			return objInfo, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 		return objInfo, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-11-22 15:48:50 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return objInfo, nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-01 23:57:42 +08:00
										 |  |  | // getObjectInfoAndQuroum - wrapper for reading object metadata and constructs ObjectInfo, additionally returns write quorum for the object.
 | 
					
						
							|  |  |  | func (er erasureObjects) getObjectInfoAndQuorum(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, wquorum int, err error) { | 
					
						
							|  |  |  | 	fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-12-29 04:41:52 +08:00
										 |  |  | 		return objInfo, er.defaultWQuorum(), toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2021-09-01 23:57:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	wquorum = fi.Erasure.DataBlocks | 
					
						
							|  |  |  | 	if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks { | 
					
						
							|  |  |  | 		wquorum++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objInfo = fi.ToObjectInfo(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	if !fi.VersionPurgeStatus().Empty() && opts.VersionID != "" { | 
					
						
							| 
									
										
										
										
											2021-09-01 23:57:42 +08:00
										 |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 		return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if opts.VersionID == "" || opts.DeleteMarker { | 
					
						
							|  |  |  | 			return objInfo, wquorum, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 		return objInfo, wquorum, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return objInfo, wquorum, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // Similar to rename but renames data from srcEntry to dstEntry at dataDir
 | 
					
						
							| 
									
										
										
										
											2021-04-21 01:44:39 +08:00
										 |  |  | func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) { | 
					
						
							| 
									
										
										
										
											2021-05-17 23:25:48 +08:00
										 |  |  | 	defer NSUpdated(dstBucket, dstEntry) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-01 10:32:07 +08:00
										 |  |  | 	fvID := mustGetUUID() | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		metadata[index].SetTierFreeVersionID(fvID) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Rename file on all underlying storage disks.
 | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-04-21 01:44:39 +08:00
										 |  |  | 			// Pick one FileInfo for a disk at index.
 | 
					
						
							|  |  |  | 			fi := metadata[index] | 
					
						
							|  |  |  | 			// Assign index when index is initialized
 | 
					
						
							|  |  |  | 			if fi.Erasure.Index == 0 { | 
					
						
							|  |  |  | 				fi.Erasure.Index = index + 1 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-01 10:32:07 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-21 01:44:39 +08:00
										 |  |  | 			if fi.IsValid() { | 
					
						
							|  |  |  | 				return disks[index].RenameData(ctx, srcBucket, srcEntry, fi, dstBucket, dstEntry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return errFileCorrupt | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for all renames to finish.
 | 
					
						
							|  |  |  | 	errs := g.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-02-24 03:59:13 +08:00
										 |  |  | 	// We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// otherwise return failure. Cleanup successful renames.
 | 
					
						
							|  |  |  | 	err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) | 
					
						
							|  |  |  | 	return evalDisks(disks, errs), err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 	data := r.Reader | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// No metadata is set, allocate a new one.
 | 
					
						
							|  |  |  | 	if opts.UserDefined == nil { | 
					
						
							|  |  |  | 		opts.UserDefined = make(map[string]string) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageDisks := er.getDisks() | 
					
						
							|  |  |  | 	// Get parity and data drive count based on storage class metadata
 | 
					
						
							|  |  |  | 	parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) | 
					
						
							|  |  |  | 	if parityDrives <= 0 { | 
					
						
							|  |  |  | 		parityDrives = er.defaultParityCount | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	dataDrives := len(storageDisks) - parityDrives | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// we now know the number of blocks this object needs for data and parity.
 | 
					
						
							|  |  |  | 	// writeQuorum is dataBlocks + 1
 | 
					
						
							|  |  |  | 	writeQuorum := dataDrives | 
					
						
							|  |  |  | 	if dataDrives == parityDrives { | 
					
						
							|  |  |  | 		writeQuorum++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Validate input data size and it can never be less than zero.
 | 
					
						
							|  |  |  | 	if data.Size() < -1 { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, errInvalidArgument, logger.Application) | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(errInvalidArgument) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Initialize parts metadata
 | 
					
						
							|  |  |  | 	partsMetadata := make([]FileInfo, len(storageDisks)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fi := newFileInfo(pathJoin(minioMetaBucket, key), dataDrives, parityDrives) | 
					
						
							|  |  |  | 	fi.DataDir = mustGetUUID() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Initialize erasure metadata.
 | 
					
						
							|  |  |  | 	for index := range partsMetadata { | 
					
						
							|  |  |  | 		partsMetadata[index] = fi | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Order disks according to erasure distribution
 | 
					
						
							|  |  |  | 	var onlineDisks []StorageAPI | 
					
						
							|  |  |  | 	onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
 | 
					
						
							|  |  |  | 	var buffer []byte | 
					
						
							|  |  |  | 	switch size := data.Size(); { | 
					
						
							|  |  |  | 	case size == 0: | 
					
						
							|  |  |  | 		buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
 | 
					
						
							|  |  |  | 	case size >= fi.Erasure.BlockSize: | 
					
						
							|  |  |  | 		buffer = er.bp.Get() | 
					
						
							|  |  |  | 		defer er.bp.Put(buffer) | 
					
						
							|  |  |  | 	case size < fi.Erasure.BlockSize: | 
					
						
							|  |  |  | 		// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
 | 
					
						
							|  |  |  | 		buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if len(buffer) > int(fi.Erasure.BlockSize) { | 
					
						
							|  |  |  | 		buffer = buffer[:fi.Erasure.BlockSize] | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	shardFileSize := erasure.ShardFileSize(data.Size()) | 
					
						
							|  |  |  | 	writers := make([]io.Writer, len(onlineDisks)) | 
					
						
							|  |  |  | 	inlineBuffers := make([]*bytes.Buffer, len(onlineDisks)) | 
					
						
							|  |  |  | 	for i, disk := range onlineDisks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-01-13 10:49:01 +08:00
										 |  |  | 		if disk.IsOnline() { | 
					
						
							|  |  |  | 			inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize)) | 
					
						
							|  |  |  | 			writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum) | 
					
						
							|  |  |  | 	closeBitrotWriters(writers) | 
					
						
							|  |  |  | 	if erasureErr != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(erasureErr, minioMetaBucket, key) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Should return IncompleteBody{} error when reader has fewer bytes
 | 
					
						
							|  |  |  | 	// than specified in request header.
 | 
					
						
							|  |  |  | 	if n < data.Size() { | 
					
						
							|  |  |  | 		return ObjectInfo{}, IncompleteBody{Bucket: minioMetaBucket, Object: key} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, w := range writers { | 
					
						
							|  |  |  | 		if w == nil { | 
					
						
							| 
									
										
										
										
											2022-01-13 10:49:01 +08:00
										 |  |  | 			// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
 | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 			onlineDisks[i] = nil | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-08-11 02:12:22 +08:00
										 |  |  | 		partsMetadata[i].Data = inlineBuffers[i].Bytes() | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 		partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize()) | 
					
						
							|  |  |  | 		partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ | 
					
						
							|  |  |  | 			PartNumber: 1, | 
					
						
							|  |  |  | 			Algorithm:  DefaultBitrotAlgorithm, | 
					
						
							|  |  |  | 			Hash:       bitrotWriterSum(w), | 
					
						
							|  |  |  | 		}) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	modTime := UTCNow() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Fill all the necessary metadata.
 | 
					
						
							|  |  |  | 	// Update `xl.meta` content on each disks.
 | 
					
						
							|  |  |  | 	for index := range partsMetadata { | 
					
						
							|  |  |  | 		partsMetadata[index].Size = n | 
					
						
							| 
									
										
										
										
											2021-08-11 02:12:22 +08:00
										 |  |  | 		partsMetadata[index].Fresh = true | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 		partsMetadata[index].ModTime = modTime | 
					
						
							| 
									
										
										
										
											2021-08-11 02:12:22 +08:00
										 |  |  | 		partsMetadata[index].Metadata = opts.UserDefined | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Set an additional header when data is inlined.
 | 
					
						
							|  |  |  | 	for index := range partsMetadata { | 
					
						
							|  |  |  | 		partsMetadata[index].SetInlineData() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := 0; i < len(onlineDisks); i++ { | 
					
						
							|  |  |  | 		if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { | 
					
						
							|  |  |  | 			// Object info is the same in all disks, so we can pick
 | 
					
						
							|  |  |  | 			// the first meta from online disk
 | 
					
						
							|  |  |  | 			fi = partsMetadata[i] | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-11 02:12:22 +08:00
										 |  |  | 	if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaBucket, key, partsMetadata, writeQuorum); err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key) | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-11 02:12:22 +08:00
										 |  |  | 	return fi.ToObjectInfo(minioMetaBucket, key), nil | 
					
						
							| 
									
										
										
										
											2021-08-09 21:58:54 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // PutObject - creates an object upon reading from the input stream
 | 
					
						
							|  |  |  | // until EOF, erasure codes the data across all disk and additionally
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // writes `xl.meta` which carries the necessary metadata for future
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // object operations.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 	return er.putObject(ctx, bucket, object, data, opts) | 
					
						
							| 
									
										
										
										
											2018-01-13 12:34:52 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // putObject wrapper for erasureObjects PutObject
 | 
					
						
							|  |  |  | func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	data := r.Reader | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	// No metadata is set, allocate a new one.
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 	if opts.UserDefined == nil { | 
					
						
							|  |  |  | 		opts.UserDefined = make(map[string]string) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-10 02:19:47 +08:00
										 |  |  | 	parityDrives := len(storageDisks) / 2 | 
					
						
							|  |  |  | 	if !opts.MaxParity { | 
					
						
							|  |  |  | 		// Get parity and data drive count based on storage class metadata
 | 
					
						
							|  |  |  | 		parityDrives = globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) | 
					
						
							|  |  |  | 		if parityDrives <= 0 { | 
					
						
							|  |  |  | 			parityDrives = er.defaultParityCount | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// If we have offline disks upgrade the number of erasure codes for this object.
 | 
					
						
							| 
									
										
										
										
											2021-05-28 04:38:04 +08:00
										 |  |  | 		parityOrig := parityDrives | 
					
						
							| 
									
										
										
										
											2021-07-08 16:04:37 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		atomicParityDrives := uatomic.NewInt64(0) | 
					
						
							|  |  |  | 		// Start with current parityDrives
 | 
					
						
							|  |  |  | 		atomicParityDrives.Store(int64(parityDrives)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		var wg sync.WaitGroup | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 		for _, disk := range storageDisks { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							| 
									
										
										
										
											2021-07-08 16:04:37 +08:00
										 |  |  | 				atomicParityDrives.Inc() | 
					
						
							| 
									
										
										
										
											2021-06-05 00:38:19 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-08 16:04:37 +08:00
										 |  |  | 			if !disk.IsOnline() { | 
					
						
							|  |  |  | 				atomicParityDrives.Inc() | 
					
						
							|  |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-08 16:04:37 +08:00
										 |  |  | 			wg.Add(1) | 
					
						
							|  |  |  | 			go func(disk StorageAPI) { | 
					
						
							|  |  |  | 				defer wg.Done() | 
					
						
							|  |  |  | 				di, err := disk.DiskInfo(ctx) | 
					
						
							|  |  |  | 				if err != nil || di.ID == "" { | 
					
						
							|  |  |  | 					atomicParityDrives.Inc() | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			}(disk) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		parityDrives = int(atomicParityDrives.Load()) | 
					
						
							|  |  |  | 		if parityDrives >= len(storageDisks)/2 { | 
					
						
							|  |  |  | 			parityDrives = len(storageDisks) / 2 | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-28 04:38:04 +08:00
										 |  |  | 		if parityOrig != parityDrives { | 
					
						
							|  |  |  | 			opts.UserDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives) | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	dataDrives := len(storageDisks) - parityDrives | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// we now know the number of blocks this object needs for data and parity.
 | 
					
						
							|  |  |  | 	// writeQuorum is dataBlocks + 1
 | 
					
						
							| 
									
										
										
										
											2020-06-10 10:19:03 +08:00
										 |  |  | 	writeQuorum := dataDrives | 
					
						
							|  |  |  | 	if dataDrives == parityDrives { | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 		writeQuorum++ | 
					
						
							| 
									
										
										
										
											2020-06-10 10:19:03 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	// Validate input data size and it can never be less than zero.
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	if data.Size() < -1 { | 
					
						
							| 
									
										
										
										
											2019-10-12 09:50:54 +08:00
										 |  |  | 		logger.LogIf(ctx, errInvalidArgument, logger.Application) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(errInvalidArgument) | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	// Initialize parts metadata
 | 
					
						
							| 
									
										
										
										
											2020-12-08 02:04:07 +08:00
										 |  |  | 	partsMetadata := make([]FileInfo, len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-26 02:11:31 +08:00
										 |  |  | 	fi := newFileInfo(pathJoin(bucket, object), dataDrives, parityDrives) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	fi.VersionID = opts.VersionID | 
					
						
							|  |  |  | 	if opts.Versioned && fi.VersionID == "" { | 
					
						
							|  |  |  | 		fi.VersionID = mustGetUUID() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-16 10:54:07 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	fi.DataDir = mustGetUUID() | 
					
						
							| 
									
										
										
										
											2021-05-16 10:54:07 +08:00
										 |  |  | 	uniqueID := mustGetUUID() | 
					
						
							|  |  |  | 	tempObj := uniqueID | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Initialize erasure metadata.
 | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	for index := range partsMetadata { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partsMetadata[index] = fi | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Order disks according to erasure distribution
 | 
					
						
							| 
									
										
										
										
											2020-10-28 15:09:15 +08:00
										 |  |  | 	var onlineDisks []StorageAPI | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 	onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi) | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
 | 
					
						
							| 
									
										
										
										
											2018-06-14 02:55:12 +08:00
										 |  |  | 	var buffer []byte | 
					
						
							|  |  |  | 	switch size := data.Size(); { | 
					
						
							|  |  |  | 	case size == 0: | 
					
						
							|  |  |  | 		buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
 | 
					
						
							| 
									
										
										
										
											2021-03-04 08:28:10 +08:00
										 |  |  | 	case size == -1: | 
					
						
							|  |  |  | 		if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize { | 
					
						
							|  |  |  | 			buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			buffer = er.bp.Get() | 
					
						
							|  |  |  | 			defer er.bp.Put(buffer) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	case size >= fi.Erasure.BlockSize: | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		buffer = er.bp.Get() | 
					
						
							|  |  |  | 		defer er.bp.Put(buffer) | 
					
						
							|  |  |  | 	case size < fi.Erasure.BlockSize: | 
					
						
							| 
									
										
										
										
											2018-06-14 02:55:12 +08:00
										 |  |  | 		// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) | 
					
						
							| 
									
										
										
										
											2018-06-14 02:55:12 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if len(buffer) > int(fi.Erasure.BlockSize) { | 
					
						
							|  |  |  | 		buffer = buffer[:fi.Erasure.BlockSize] | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	partName := "part.1" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName) | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-25 05:19:52 +08:00
										 |  |  | 	// Delete temporary object in the event of failure.
 | 
					
						
							|  |  |  | 	// If PutObject succeeded there would be no temporary
 | 
					
						
							|  |  |  | 	// object to delete.
 | 
					
						
							|  |  |  | 	var online int | 
					
						
							|  |  |  | 	defer func() { | 
					
						
							|  |  |  | 		if online != len(onlineDisks) { | 
					
						
							| 
									
										
										
										
											2022-01-14 03:07:41 +08:00
										 |  |  | 			er.renameAll(context.Background(), minioMetaTmpBucket, tempObj) | 
					
						
							| 
									
										
										
										
											2021-03-25 05:19:52 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-01 00:19:14 +08:00
										 |  |  | 	shardFileSize := erasure.ShardFileSize(data.Size()) | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	writers := make([]io.Writer, len(onlineDisks)) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	var inlineBuffers []*bytes.Buffer | 
					
						
							| 
									
										
										
										
											2021-04-01 00:19:14 +08:00
										 |  |  | 	if shardFileSize >= 0 { | 
					
						
							|  |  |  | 		if !opts.Versioned && shardFileSize < smallFileThreshold { | 
					
						
							|  |  |  | 			inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) | 
					
						
							|  |  |  | 		} else if shardFileSize < smallFileThreshold/8 { | 
					
						
							|  |  |  | 			inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-08-13 01:05:24 +08:00
										 |  |  | 	} else { | 
					
						
							|  |  |  | 		// If compressed, use actual size to determine.
 | 
					
						
							|  |  |  | 		if sz := erasure.ShardFileSize(data.ActualSize()); sz > 0 { | 
					
						
							|  |  |  | 			if !opts.Versioned && sz < smallFileThreshold { | 
					
						
							|  |  |  | 				inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) | 
					
						
							|  |  |  | 			} else if sz < smallFileThreshold/8 { | 
					
						
							|  |  |  | 				inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	for i, disk := range onlineDisks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-13 10:49:01 +08:00
										 |  |  | 		if !disk.IsOnline() { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 		if len(inlineBuffers) > 0 { | 
					
						
							| 
									
										
										
										
											2021-08-13 01:05:24 +08:00
										 |  |  | 			sz := shardFileSize | 
					
						
							|  |  |  | 			if sz < 0 { | 
					
						
							|  |  |  | 				sz = data.ActualSize() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, sz)) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2022-01-13 10:49:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-17 23:32:28 +08:00
										 |  |  | 		writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize()) | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-15 02:01:25 +08:00
										 |  |  | 	toEncode := io.Reader(data) | 
					
						
							|  |  |  | 	if data.Size() > bigFileThreshold { | 
					
						
							|  |  |  | 		// We use 2 buffers, so we always have a full buffer of input.
 | 
					
						
							|  |  |  | 		bufA := er.bp.Get() | 
					
						
							|  |  |  | 		bufB := er.bp.Get() | 
					
						
							|  |  |  | 		defer er.bp.Put(bufA) | 
					
						
							|  |  |  | 		defer er.bp.Put(bufB) | 
					
						
							|  |  |  | 		ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]}) | 
					
						
							|  |  |  | 		if err == nil { | 
					
						
							|  |  |  | 			toEncode = ra | 
					
						
							|  |  |  | 			defer ra.Close() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum) | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	closeBitrotWriters(writers) | 
					
						
							|  |  |  | 	if erasureErr != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	// Should return IncompleteBody{} error when reader has fewer bytes
 | 
					
						
							|  |  |  | 	// than specified in request header.
 | 
					
						
							|  |  |  | 	if n < data.Size() { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 		return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-01 07:34:49 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 	if !opts.NoLock { | 
					
						
							|  |  |  | 		lk := er.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		lkctx, err := lk.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer lk.Unlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2020-09-16 11:44:48 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	for i, w := range writers { | 
					
						
							|  |  |  | 		if w == nil { | 
					
						
							|  |  |  | 			onlineDisks[i] = nil | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 		if len(inlineBuffers) > 0 && inlineBuffers[i] != nil { | 
					
						
							|  |  |  | 			partsMetadata[i].Data = inlineBuffers[i].Bytes() | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			partsMetadata[i].Data = nil | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 		partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize()) | 
					
						
							|  |  |  | 		partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ | 
					
						
							|  |  |  | 			PartNumber: 1, | 
					
						
							|  |  |  | 			Algorithm:  DefaultBitrotAlgorithm, | 
					
						
							|  |  |  | 			Hash:       bitrotWriterSum(w), | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2016-07-19 10:06:48 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-08-13 08:32:24 +08:00
										 |  |  | 	if opts.UserDefined["etag"] == "" { | 
					
						
							|  |  |  | 		opts.UserDefined["etag"] = r.MD5CurrentHexString() | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-17 12:42:02 +08:00
										 |  |  | 	// Guess content-type from the extension if possible.
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 	if opts.UserDefined["content-type"] == "" { | 
					
						
							|  |  |  | 		opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object)) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-09 08:36:56 +08:00
										 |  |  | 	modTime := opts.MTime | 
					
						
							|  |  |  | 	if opts.MTime.IsZero() { | 
					
						
							|  |  |  | 		modTime = UTCNow() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-27 10:55:48 +08:00
										 |  |  | 	// Fill all the necessary metadata.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Update `xl.meta` content on each disks.
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 	for index := range partsMetadata { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partsMetadata[index].Metadata = opts.UserDefined | 
					
						
							|  |  |  | 		partsMetadata[index].Size = n | 
					
						
							|  |  |  | 		partsMetadata[index].ModTime = modTime | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 	if len(inlineBuffers) > 0 { | 
					
						
							|  |  |  | 		// Set an additional header when data is inlined.
 | 
					
						
							|  |  |  | 		for index := range partsMetadata { | 
					
						
							| 
									
										
										
										
											2021-07-17 00:38:27 +08:00
										 |  |  | 			partsMetadata[index].SetInlineData() | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 	// Rename the successfully written temporary object to final location.
 | 
					
						
							| 
									
										
										
										
											2021-04-21 01:44:39 +08:00
										 |  |  | 	if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2022-01-13 10:49:01 +08:00
										 |  |  | 		if errors.Is(err, errFileNotFound) { | 
					
						
							|  |  |  | 			return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-05-29 06:13:15 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-29 15:42:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 	for i := 0; i < len(onlineDisks); i++ { | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 		if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { | 
					
						
							| 
									
										
										
										
											2021-07-16 13:32:06 +08:00
										 |  |  | 			// Object info is the same in all disks, so we can pick
 | 
					
						
							|  |  |  | 			// the first meta from online disk
 | 
					
						
							|  |  |  | 			fi = partsMetadata[i] | 
					
						
							|  |  |  | 			break | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-02-24 03:59:13 +08:00
										 |  |  | 	// For speedtest objects do not attempt to heal them.
 | 
					
						
							|  |  |  | 	if !opts.Speedtest { | 
					
						
							|  |  |  | 		// Whether a disk was initially or becomes offline
 | 
					
						
							|  |  |  | 		// during this upload, send it to the MRF list.
 | 
					
						
							|  |  |  | 		for i := 0; i < len(onlineDisks); i++ { | 
					
						
							|  |  |  | 			if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			er.addPartial(bucket, object, fi.VersionID, fi.Size) | 
					
						
							|  |  |  | 			break | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-16 13:32:06 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	fi.ReplicationState = opts.PutReplicationState() | 
					
						
							| 
									
										
										
										
											2021-03-25 05:19:52 +08:00
										 |  |  | 	online = countOnlineDisks(onlineDisks) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-23 10:17:09 +08:00
										 |  |  | 	// we are adding a new version to this object under the namespace lock, so this is the latest version.
 | 
					
						
							|  |  |  | 	fi.IsLatest = true | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return fi.ToObjectInfo(bucket, object), nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-04 02:33:43 +08:00
										 |  |  | func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo, forceDelMarker bool) error { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	disks := er.getDisks() | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-02-04 02:33:43 +08:00
										 |  |  | 			return disks[index].DeleteVersion(ctx, bucket, object, fi, forceDelMarker) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2016-09-03 03:18:35 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// return errors if any during deletion
 | 
					
						
							|  |  |  | 	return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list
 | 
					
						
							|  |  |  | // into smaller bulks if some object names are found to be duplicated in the delete list, splitting
 | 
					
						
							|  |  |  | // into smaller bulks will avoid holding twice the write lock of the duplicated object names.
 | 
					
						
							|  |  |  | func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { | 
					
						
							|  |  |  | 	errs := make([]error, len(objects)) | 
					
						
							|  |  |  | 	dobjects := make([]DeletedObject, len(objects)) | 
					
						
							|  |  |  | 	writeQuorums := make([]int, len(objects)) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := range objects { | 
					
						
							|  |  |  | 		// Assume (N/2 + 1) quorums for all objects
 | 
					
						
							|  |  |  | 		// this is a theoretical assumption such that
 | 
					
						
							|  |  |  | 		// for delete's we do not need to honor storage
 | 
					
						
							|  |  |  | 		// class for objects which have reduced quorum
 | 
					
						
							|  |  |  | 		// storage class only needs to be honored for
 | 
					
						
							|  |  |  | 		// Read() requests alone which we already do.
 | 
					
						
							| 
									
										
										
										
											2022-01-28 09:00:15 +08:00
										 |  |  | 		writeQuorums[i] = len(storageDisks)/2 + 1 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 	versionsMap := make(map[string]FileInfoVersions, len(objects)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for i := range objects { | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 		// Construct the FileInfo data that needs to be preserved on the disk.
 | 
					
						
							|  |  |  | 		vr := FileInfo{ | 
					
						
							|  |  |  | 			Name:             objects[i].ObjectName, | 
					
						
							|  |  |  | 			VersionID:        objects[i].VersionID, | 
					
						
							|  |  |  | 			ReplicationState: objects[i].ReplicationState(), | 
					
						
							|  |  |  | 			// save the index to set correct error at this index.
 | 
					
						
							|  |  |  | 			Idx: i, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		vr.SetTierFreeVersionID(mustGetUUID()) | 
					
						
							|  |  |  | 		// VersionID is not set means delete is not specific about
 | 
					
						
							|  |  |  | 		// any version, look for if the bucket is versioned or not.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		if objects[i].VersionID == "" { | 
					
						
							| 
									
										
										
										
											2020-11-13 04:10:59 +08:00
										 |  |  | 			if opts.Versioned || opts.VersionSuspended { | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 				// Bucket is versioned and no version was explicitly
 | 
					
						
							|  |  |  | 				// mentioned for deletes, create a delete marker instead.
 | 
					
						
							|  |  |  | 				vr.ModTime = UTCNow() | 
					
						
							|  |  |  | 				vr.Deleted = true | 
					
						
							|  |  |  | 				// Versioning suspended means that we add a `null` version
 | 
					
						
							|  |  |  | 				// delete marker, if not add a new version for this delete
 | 
					
						
							|  |  |  | 				// marker.
 | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 				if opts.Versioned { | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 					vr.VersionID = mustGetUUID() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 		// De-dup same object name to collect multiple versions for same object.
 | 
					
						
							|  |  |  | 		v, ok := versionsMap[objects[i].ObjectName] | 
					
						
							|  |  |  | 		if ok { | 
					
						
							|  |  |  | 			v.Versions = append(v.Versions, vr) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			v = FileInfoVersions{ | 
					
						
							|  |  |  | 				Name:     vr.Name, | 
					
						
							|  |  |  | 				Versions: []FileInfo{vr}, | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 		if vr.Deleted { | 
					
						
							|  |  |  | 			dobjects[i] = DeletedObject{ | 
					
						
							|  |  |  | 				DeleteMarker:          vr.Deleted, | 
					
						
							|  |  |  | 				DeleteMarkerVersionID: vr.VersionID, | 
					
						
							|  |  |  | 				DeleteMarkerMTime:     DeleteMarkerMTime{vr.ModTime}, | 
					
						
							|  |  |  | 				ObjectName:            vr.Name, | 
					
						
							|  |  |  | 				ReplicationState:      vr.ReplicationState, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			dobjects[i] = DeletedObject{ | 
					
						
							|  |  |  | 				ObjectName:       vr.Name, | 
					
						
							|  |  |  | 				VersionID:        vr.VersionID, | 
					
						
							|  |  |  | 				ReplicationState: vr.ReplicationState, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		versionsMap[objects[i].ObjectName] = v | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	dedupVersions := make([]FileInfoVersions, 0, len(versionsMap)) | 
					
						
							|  |  |  | 	for _, version := range versionsMap { | 
					
						
							|  |  |  | 		dedupVersions = append(dedupVersions, version) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-07 05:44:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	// Initialize list of errors.
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	delObjErrs := make([][]error, len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	// Remove versions in bulk for each disk
 | 
					
						
							|  |  |  | 	for index, disk := range storageDisks { | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(index int, disk StorageAPI) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 			delObjErrs[index] = make([]error, len(objects)) | 
					
						
							| 
									
										
										
										
											2020-11-29 13:15:45 +08:00
										 |  |  | 			if disk == nil { | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 				for i := range objects { | 
					
						
							| 
									
										
										
										
											2020-11-29 13:15:45 +08:00
										 |  |  | 					delObjErrs[index][i] = errDiskNotFound | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 			errs := disk.DeleteVersions(ctx, bucket, dedupVersions) | 
					
						
							|  |  |  | 			for i, err := range errs { | 
					
						
							|  |  |  | 				if err == nil { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				for _, v := range dedupVersions[i].Versions { | 
					
						
							| 
									
										
										
										
											2022-01-07 02:47:49 +08:00
										 |  |  | 					if err == errFileNotFound || err == errFileVersionNotFound { | 
					
						
							|  |  |  | 						if !dobjects[v.Idx].DeleteMarker { | 
					
						
							|  |  |  | 							// Not delete marker, if not found, ok.
 | 
					
						
							|  |  |  | 							continue | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 					delObjErrs[index][v.Idx] = err | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		}(index, disk) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	// Reduce errors for each object
 | 
					
						
							|  |  |  | 	for objIndex := range objects { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		diskErrs := make([]error, len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 		// Iterate over disks to fetch the error
 | 
					
						
							|  |  |  | 		// of deleting of the current object
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		for i := range delObjErrs { | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 			// delObjErrs[i] is not nil when disks[i] is also not nil
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 			if delObjErrs[i] != nil { | 
					
						
							| 
									
										
										
										
											2020-09-02 15:19:03 +08:00
										 |  |  | 				diskErrs[i] = delObjErrs[i][objIndex] | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-11-29 13:15:45 +08:00
										 |  |  | 		err := reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) | 
					
						
							|  |  |  | 		if objects[objIndex].VersionID != "" { | 
					
						
							|  |  |  | 			errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName, objects[objIndex].VersionID) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-28 09:00:15 +08:00
										 |  |  | 		defer NSUpdated(bucket, objects[objIndex].ObjectName) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	// Check failed deletes across multiple objects
 | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 	for i, dobj := range dobjects { | 
					
						
							|  |  |  | 		// This object errored, no need to attempt a heal.
 | 
					
						
							|  |  |  | 		if errs[i] != nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 		// Check if there is any offline disk and add it to the MRF list
 | 
					
						
							|  |  |  | 		for _, disk := range storageDisks { | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 			if disk != nil && disk.IsOnline() { | 
					
						
							|  |  |  | 				// Skip attempted heal on online disks.
 | 
					
						
							|  |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// all other direct versionId references we should
 | 
					
						
							|  |  |  | 			// ensure no dangling file is left over.
 | 
					
						
							| 
									
										
										
										
											2021-11-02 01:50:07 +08:00
										 |  |  | 			er.addPartial(bucket, dobj.ObjectName, dobj.VersionID, -1) | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 			break | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return dobjects, errs | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-16 09:43:14 +08:00
										 |  |  | func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string) error { | 
					
						
							|  |  |  | 	disks := er.getDisks() | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return disks[index].Delete(ctx, bucket, prefix, true) | 
					
						
							|  |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for _, err := range g.Wait() { | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // DeleteObject - deletes an object, this call doesn't necessary reply
 | 
					
						
							|  |  |  | // any error as it is not necessary for the handler to reply back a
 | 
					
						
							|  |  |  | // response to the client request.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2021-06-16 09:43:14 +08:00
										 |  |  | 	if opts.DeletePrefix { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(er.deletePrefix(ctx, bucket, object), bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-28 08:06:47 +08:00
										 |  |  | 	var lc *lifecycle.Lifecycle | 
					
						
							|  |  |  | 	if opts.Expiration.Expire { | 
					
						
							|  |  |  | 		// Check if the current bucket has a configured lifecycle policy
 | 
					
						
							|  |  |  | 		lc, _ = globalLifecycleSys.Get(bucket) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// expiration attempted on a bucket with no lifecycle
 | 
					
						
							|  |  |  | 	// rules shall be rejected.
 | 
					
						
							|  |  |  | 	if lc == nil && opts.Expiration.Expire { | 
					
						
							|  |  |  | 		if opts.VersionID != "" { | 
					
						
							|  |  |  | 			return objInfo, VersionNotFound{ | 
					
						
							|  |  |  | 				Bucket:    bucket, | 
					
						
							|  |  |  | 				Object:    object, | 
					
						
							|  |  |  | 				VersionID: opts.VersionID, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return objInfo, ObjectNotFound{ | 
					
						
							|  |  |  | 			Bucket: bucket, | 
					
						
							|  |  |  | 			Object: object, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Acquire a write lock before deleting the object.
 | 
					
						
							|  |  |  | 	lk := er.NewNSLock(bucket, object) | 
					
						
							|  |  |  | 	lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	ctx = lkctx.Context() | 
					
						
							|  |  |  | 	defer lk.Unlock(lkctx.Cancel) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	versionFound := true | 
					
						
							| 
									
										
										
										
											2020-12-12 08:58:15 +08:00
										 |  |  | 	objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response.
 | 
					
						
							| 
									
										
										
										
											2021-09-01 23:57:42 +08:00
										 |  |  | 	goi, writeQuorum, gerr := er.getObjectInfoAndQuorum(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-10-09 08:47:21 +08:00
										 |  |  | 	if gerr != nil && goi.Name == "" { | 
					
						
							|  |  |  | 		switch gerr.(type) { | 
					
						
							|  |  |  | 		case InsufficientReadQuorum: | 
					
						
							|  |  |  | 			return objInfo, InsufficientWriteQuorum{} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 		// For delete marker replication, versionID being replicated will not exist on disk
 | 
					
						
							|  |  |  | 		if opts.DeleteMarker { | 
					
						
							|  |  |  | 			versionFound = false | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			return objInfo, gerr | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-10-09 08:47:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-17 23:25:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-28 08:06:47 +08:00
										 |  |  | 	if opts.Expiration.Expire { | 
					
						
							|  |  |  | 		action := evalActionFromLifecycle(ctx, *lc, goi, false) | 
					
						
							|  |  |  | 		var isErr bool | 
					
						
							|  |  |  | 		switch action { | 
					
						
							|  |  |  | 		case lifecycle.NoneAction: | 
					
						
							|  |  |  | 			isErr = true | 
					
						
							|  |  |  | 		case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: | 
					
						
							|  |  |  | 			isErr = true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if isErr { | 
					
						
							|  |  |  | 			if goi.VersionID != "" { | 
					
						
							|  |  |  | 				return goi, VersionNotFound{ | 
					
						
							|  |  |  | 					Bucket:    bucket, | 
					
						
							|  |  |  | 					Object:    object, | 
					
						
							|  |  |  | 					VersionID: goi.VersionID, | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return goi, ObjectNotFound{ | 
					
						
							|  |  |  | 				Bucket: bucket, | 
					
						
							|  |  |  | 				Object: object, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-28 08:06:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	defer NSUpdated(bucket, object) | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2021-09-01 23:57:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	var markDelete bool | 
					
						
							|  |  |  | 	// Determine whether to mark object deleted for replication
 | 
					
						
							|  |  |  | 	if goi.VersionID != "" { | 
					
						
							|  |  |  | 		markDelete = true | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-10 07:11:43 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	// Default deleteMarker to true if object is under versioning
 | 
					
						
							| 
									
										
										
										
											2021-02-10 07:11:43 +08:00
										 |  |  | 	deleteMarker := opts.Versioned | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	if opts.VersionID != "" { | 
					
						
							|  |  |  | 		// case where replica version needs to be deleted on target cluster
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		if versionFound && opts.DeleteMarkerReplicationStatus() == replication.Replica { | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 			markDelete = false | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		if opts.VersionPurgeStatus().Empty() && opts.DeleteMarkerReplicationStatus().Empty() { | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 			markDelete = false | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		if opts.VersionPurgeStatus() == Complete { | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 			markDelete = false | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// determine if the version represents an object delete
 | 
					
						
							|  |  |  | 		// deleteMarker = true
 | 
					
						
							|  |  |  | 		if versionFound && !goi.DeleteMarker { // implies a versioned delete of object
 | 
					
						
							|  |  |  | 			deleteMarker = false | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	modTime := opts.MTime | 
					
						
							|  |  |  | 	if opts.MTime.IsZero() { | 
					
						
							|  |  |  | 		modTime = UTCNow() | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-01 10:32:07 +08:00
										 |  |  | 	fvID := mustGetUUID() | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	if markDelete { | 
					
						
							| 
									
										
										
										
											2020-11-13 04:10:59 +08:00
										 |  |  | 		if opts.Versioned || opts.VersionSuspended { | 
					
						
							| 
									
										
										
										
											2021-12-01 10:30:06 +08:00
										 |  |  | 			if !deleteMarker { | 
					
						
							|  |  |  | 				// versioning suspended means we add `null` version as
 | 
					
						
							|  |  |  | 				// delete marker, if its not decided already.
 | 
					
						
							|  |  |  | 				deleteMarker = opts.VersionSuspended && opts.VersionID == "" | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 			fi := FileInfo{ | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 				Name:             object, | 
					
						
							|  |  |  | 				Deleted:          deleteMarker, | 
					
						
							|  |  |  | 				MarkDeleted:      markDelete, | 
					
						
							|  |  |  | 				ModTime:          modTime, | 
					
						
							|  |  |  | 				ReplicationState: opts.DeleteReplication, | 
					
						
							|  |  |  | 				TransitionStatus: opts.Transition.Status, | 
					
						
							|  |  |  | 				ExpireRestored:   opts.Transition.ExpireRestored, | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-07-01 10:32:07 +08:00
										 |  |  | 			fi.SetTierFreeVersionID(fvID) | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 			if opts.Versioned { | 
					
						
							|  |  |  | 				fi.VersionID = mustGetUUID() | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 				if opts.VersionID != "" { | 
					
						
							|  |  |  | 					fi.VersionID = opts.VersionID | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-12-01 10:30:06 +08:00
										 |  |  | 			// versioning suspended means we add `null` version as
 | 
					
						
							|  |  |  | 			// delete marker. Add delete marker, since we don't have
 | 
					
						
							|  |  |  | 			// any version specified explicitly. Or if a particular
 | 
					
						
							|  |  |  | 			// version id needs to be replicated.
 | 
					
						
							| 
									
										
										
										
											2021-02-04 02:33:43 +08:00
										 |  |  | 			if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, opts.DeleteMarker); err != nil { | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 				return objInfo, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-07 03:03:57 +08:00
										 |  |  | 			return fi.ToObjectInfo(bucket, object), nil | 
					
						
							| 
									
										
										
										
											2018-09-01 04:16:35 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-17 13:18:43 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Delete the object version on all disks.
 | 
					
						
							| 
									
										
										
										
											2021-07-01 10:32:07 +08:00
										 |  |  | 	dfi := FileInfo{ | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		Name:             object, | 
					
						
							|  |  |  | 		VersionID:        opts.VersionID, | 
					
						
							|  |  |  | 		MarkDeleted:      markDelete, | 
					
						
							|  |  |  | 		Deleted:          deleteMarker, | 
					
						
							|  |  |  | 		ModTime:          modTime, | 
					
						
							|  |  |  | 		ReplicationState: opts.DeleteReplication, | 
					
						
							|  |  |  | 		TransitionStatus: opts.Transition.Status, | 
					
						
							|  |  |  | 		ExpireRestored:   opts.Transition.ExpireRestored, | 
					
						
							| 
									
										
										
										
											2021-07-01 10:32:07 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	dfi.SetTierFreeVersionID(fvID) | 
					
						
							|  |  |  | 	if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, dfi, opts.DeleteMarker); err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return objInfo, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-06-08 02:35:03 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	for _, disk := range storageDisks { | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 		if disk != nil && disk.IsOnline() { | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-16 13:32:06 +08:00
										 |  |  | 		er.addPartial(bucket, object, opts.VersionID, -1) | 
					
						
							| 
									
										
										
										
											2020-12-23 01:16:43 +08:00
										 |  |  | 		break | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	return ObjectInfo{ | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		Bucket:                     bucket, | 
					
						
							|  |  |  | 		Name:                       object, | 
					
						
							|  |  |  | 		VersionID:                  opts.VersionID, | 
					
						
							|  |  |  | 		VersionPurgeStatusInternal: opts.DeleteReplication.VersionPurgeStatusInternal, | 
					
						
							|  |  |  | 		ReplicationStatusInternal:  opts.DeleteReplication.ReplicationStatusInternal, | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	}, nil | 
					
						
							| 
									
										
										
										
											2018-02-10 07:19:30 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | // Send the successful but partial upload/delete, however ignore
 | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | // if the channel is blocked by other items.
 | 
					
						
							| 
									
										
										
										
											2021-07-16 13:32:06 +08:00
										 |  |  | func (er erasureObjects) addPartial(bucket, object, versionID string, size int64) { | 
					
						
							|  |  |  | 	globalMRFState.addPartialOp(partialOperation{ | 
					
						
							|  |  |  | 		bucket:    bucket, | 
					
						
							|  |  |  | 		object:    object, | 
					
						
							|  |  |  | 		versionID: versionID, | 
					
						
							|  |  |  | 		size:      size, | 
					
						
							|  |  |  | 		setIndex:  er.setIndex, | 
					
						
							|  |  |  | 		poolIndex: er.poolIndex, | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							| 
									
										
										
										
											2021-12-22 02:08:26 +08:00
										 |  |  | 	if !opts.NoLock { | 
					
						
							|  |  |  | 		// Lock the object before updating metadata.
 | 
					
						
							|  |  |  | 		lk := er.NewNSLock(bucket, object) | 
					
						
							|  |  |  | 		lkctx, err := lk.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer lk.Unlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	disks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// List all online disks.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Pick latest valid metadata.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	filterOnlineDisksInplace(fi, metaArr, onlineDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-30 23:22:04 +08:00
										 |  |  | 	// if version-id is not specified retention is supposed to be set on the latest object.
 | 
					
						
							|  |  |  | 	if opts.VersionID == "" { | 
					
						
							|  |  |  | 		opts.VersionID = fi.VersionID | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objInfo := fi.ToObjectInfo(bucket, object) | 
					
						
							|  |  |  | 	if opts.EvalMetadataFn != nil { | 
					
						
							|  |  |  | 		if err := opts.EvalMetadataFn(objInfo); err != nil { | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for k, v := range objInfo.UserDefined { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 		fi.Metadata[k] = v | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	fi.ModTime = opts.MTime | 
					
						
							|  |  |  | 	fi.VersionID = opts.VersionID | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil { | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-30 23:22:04 +08:00
										 |  |  | 	return fi.ToObjectInfo(bucket, object), nil | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | // PutObjectTags - replace or add tags to an existing object
 | 
					
						
							|  |  |  | func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							|  |  |  | 	// Lock the object before updating tags.
 | 
					
						
							|  |  |  | 	lk := er.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := lk.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							|  |  |  | 	defer lk.Unlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	disks := er.getDisks() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// List all online disks.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Pick latest valid metadata.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return ObjectInfo{}, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	filterOnlineDisksInplace(fi, metaArr, onlineDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	fi.Metadata[xhttp.AmzObjectTagging] = tags | 
					
						
							| 
									
										
										
										
											2022-01-11 11:06:10 +08:00
										 |  |  | 	fi.ReplicationState = opts.PutReplicationState() | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	for k, v := range opts.UserDefined { | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 		fi.Metadata[k] = v | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	return fi.ToObjectInfo(bucket, object), nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | // updateObjectMeta will update the metadata of a file.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | func (er erasureObjects) updateObjectMeta(ctx context.Context, bucket, object string, fi FileInfo, onlineDisks []StorageAPI) error { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	if len(fi.Metadata) == 0 { | 
					
						
							|  |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(onlineDisks)) | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	// Start writing `xl.meta` to all disks in parallel.
 | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 	for index := range onlineDisks { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 			if onlineDisks[index] == nil { | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 			return onlineDisks[index].UpdateMetadata(ctx, bucket, object, fi) | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2021-04-02 13:12:03 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-05 04:32:31 +08:00
										 |  |  | 	// Wait for all the routines.
 | 
					
						
							|  |  |  | 	mErrs := g.Wait() | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-29 04:41:52 +08:00
										 |  |  | 	return reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, er.defaultWQuorum()) | 
					
						
							| 
									
										
										
										
											2020-10-29 00:18:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // DeleteObjectTags - delete object tags from an existing object
 | 
					
						
							| 
									
										
										
										
											2021-02-02 05:52:51 +08:00
										 |  |  | func (er erasureObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return er.PutObjectTags(ctx, bucket, object, "", opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // GetObjectTags - get object tags from an existing object
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	// GetObjectInfo will return tag value as well
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	oi, err := er.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-05-06 05:18:13 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-06 05:18:13 +08:00
										 |  |  | 	return tags.ParseObjectTags(oi.UserTags) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // TransitionObject - transition object content to target tier.
 | 
					
						
							|  |  |  | func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	tgtClient, err := globalTierConfigMgr.getDriver(opts.Transition.Tier) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-17 23:25:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	// Acquire write lock before starting to transition the object.
 | 
					
						
							|  |  |  | 	lk := er.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							|  |  |  | 	defer lk.Unlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 		return toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// verify that the object queued for transition is identical to that on disk.
 | 
					
						
							|  |  |  | 	if !opts.MTime.Equal(fi.ModTime) || !strings.EqualFold(opts.Transition.ETag, extractETag(fi.Metadata)) { | 
					
						
							|  |  |  | 		return toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// if object already transitioned, return
 | 
					
						
							|  |  |  | 	if fi.TransitionStatus == lifecycle.TransitionComplete { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-25 03:24:00 +08:00
										 |  |  | 	defer NSUpdated(bucket, object) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if fi.XLV1 { | 
					
						
							|  |  |  | 		if _, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{NoLock: true}); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Fetch FileInfo again. HealObject migrates object the latest
 | 
					
						
							|  |  |  | 		// format. Among other things this changes fi.DataDir and
 | 
					
						
							|  |  |  | 		// possibly fi.Data (if data is inlined).
 | 
					
						
							|  |  |  | 		fi, metaArr, onlineDisks, err = er.getObjectFileInfo(ctx, bucket, object, opts, true) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-17 00:38:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-21 01:49:52 +08:00
										 |  |  | 	destObj, err := genTransitionObjName(bucket) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 	pr, pw := xioutil.WaitPipe() | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	go func() { | 
					
						
							|  |  |  | 		err := er.getObjectWithFileInfo(ctx, bucket, object, 0, fi.Size, pw, fi, metaArr, onlineDisks) | 
					
						
							|  |  |  | 		pw.CloseWithError(err) | 
					
						
							|  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-04 05:26:51 +08:00
										 |  |  | 	var rv remoteVersionID | 
					
						
							|  |  |  | 	rv, err = tgtClient.Put(ctx, destObj, pr, fi.Size) | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 	pr.CloseWithError(err) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		logger.LogIf(ctx, fmt.Errorf("Unable to transition %s/%s(%s) to %s tier: %w", bucket, object, opts.VersionID, opts.Transition.Tier, err)) | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	fi.TransitionStatus = lifecycle.TransitionComplete | 
					
						
							|  |  |  | 	fi.TransitionedObjName = destObj | 
					
						
							|  |  |  | 	fi.TransitionTier = opts.Transition.Tier | 
					
						
							| 
									
										
										
										
											2021-06-04 05:26:51 +08:00
										 |  |  | 	fi.TransitionVersionID = string(rv) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	eventName := event.ObjectTransitionComplete | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2021-09-01 23:57:42 +08:00
										 |  |  | 	// we now know the number of blocks this object needs for data and parity.
 | 
					
						
							|  |  |  | 	// writeQuorum is dataBlocks + 1
 | 
					
						
							|  |  |  | 	writeQuorum := fi.Erasure.DataBlocks | 
					
						
							|  |  |  | 	if fi.Erasure.DataBlocks == fi.Erasure.ParityBlocks { | 
					
						
							|  |  |  | 		writeQuorum++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi, false); err != nil { | 
					
						
							|  |  |  | 		eventName = event.ObjectTransitionFailed | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-25 03:24:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	for _, disk := range storageDisks { | 
					
						
							|  |  |  | 		if disk != nil && disk.IsOnline() { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-07-16 13:32:06 +08:00
										 |  |  | 		er.addPartial(bucket, object, opts.VersionID, -1) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		break | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-07-31 03:45:25 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	objInfo := fi.ToObjectInfo(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	sendEvent(eventArgs{ | 
					
						
							|  |  |  | 		EventName:  eventName, | 
					
						
							|  |  |  | 		BucketName: bucket, | 
					
						
							| 
									
										
										
										
											2021-07-31 03:45:25 +08:00
										 |  |  | 		Object:     objInfo, | 
					
						
							|  |  |  | 		Host:       "Internal: [ILM-Transition]", | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2021-07-31 03:45:25 +08:00
										 |  |  | 	auditLogLifecycle(ctx, objInfo, ILMTransition) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	return err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // RestoreTransitionedObject - restore transitioned object content locally on this cluster.
 | 
					
						
							|  |  |  | // This is similar to PostObjectRestore from AWS GLACIER
 | 
					
						
							|  |  |  | // storage class. When PostObjectRestore API is called, a temporary copy of the object
 | 
					
						
							|  |  |  | // is restored locally to the bucket on source cluster until the restore expiry date.
 | 
					
						
							|  |  |  | // The copy that was transitioned continues to reside in the transitioned tier.
 | 
					
						
							|  |  |  | func (er erasureObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	return er.restoreTransitionedObject(ctx, bucket, object, opts) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // update restore status header in the metadata
 | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, object string, objInfo ObjectInfo, opts ObjectOptions, rerr error) error { | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	oi := objInfo.Clone() | 
					
						
							|  |  |  | 	oi.metadataOnly = true // Perform only metadata updates.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if rerr == nil { | 
					
						
							|  |  |  | 		oi.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String() | 
					
						
							|  |  |  | 	} else { // allow retry in the case of failure to restore
 | 
					
						
							|  |  |  | 		delete(oi.UserDefined, xhttp.AmzRestore) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if _, err := er.CopyObject(ctx, bucket, object, bucket, object, oi, ObjectOptions{ | 
					
						
							|  |  |  | 		VersionID: oi.VersionID, | 
					
						
							|  |  |  | 	}, ObjectOptions{ | 
					
						
							|  |  |  | 		VersionID: oi.VersionID, | 
					
						
							|  |  |  | 	}); err != nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err)) | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // restoreTransitionedObject for multipart object chunks the file stream from remote tier into the same number of parts
 | 
					
						
							|  |  |  | // as in the xl.meta for this version and rehydrates the part.n into the fi.DataDir for this version as in the xl.meta
 | 
					
						
							|  |  |  | func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket string, object string, opts ObjectOptions) error { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 	setRestoreHeaderFn := func(oi ObjectInfo, rerr error) error { | 
					
						
							|  |  |  | 		er.updateRestoreMetadata(ctx, bucket, object, oi, opts, rerr) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		return rerr | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var oi ObjectInfo | 
					
						
							|  |  |  | 	// get the file info on disk for transitioned object
 | 
					
						
							|  |  |  | 	actualfi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts, false) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 		return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	oi = actualfi.ToObjectInfo(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-25 10:07:27 +08:00
										 |  |  | 	ropts := putRestoreOpts(bucket, object, opts.Transition.RestoreRequest, oi) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if len(oi.Parts) == 1 { | 
					
						
							|  |  |  | 		var rs *HTTPRangeSpec | 
					
						
							|  |  |  | 		gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 			return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		defer gr.Close() | 
					
						
							|  |  |  | 		hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 			return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		pReader := NewPutObjReader(hashReader) | 
					
						
							|  |  |  | 		ropts.UserDefined[xhttp.AmzRestore] = completedRestoreObj(opts.Transition.RestoreExpiry).String() | 
					
						
							|  |  |  | 		_, err = er.PutObject(ctx, bucket, object, pReader, ropts) | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 		return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-25 10:07:27 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	uploadID, err := er.NewMultipartUpload(ctx, bucket, object, ropts) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 		return setRestoreHeaderFn(oi, err) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-25 10:07:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	var uploadedParts []CompletePart | 
					
						
							|  |  |  | 	var rs *HTTPRangeSpec | 
					
						
							|  |  |  | 	// get reader from the warm backend - note that even in the case of encrypted objects, this stream is still encrypted.
 | 
					
						
							|  |  |  | 	gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, http.Header{}, oi, opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 		return setRestoreHeaderFn(oi, err) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	defer gr.Close() | 
					
						
							| 
									
										
										
										
											2021-04-25 10:07:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	// rehydrate the parts back on disk as per the original xl.meta prior to transition
 | 
					
						
							|  |  |  | 	for _, partInfo := range oi.Parts { | 
					
						
							|  |  |  | 		hr, err := hash.NewReader(gr, partInfo.Size, "", "", partInfo.Size) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 			return setRestoreHeaderFn(oi, err) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		pInfo, err := er.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-24 01:52:26 +08:00
										 |  |  | 			return setRestoreHeaderFn(oi, err) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-27 09:24:06 +08:00
										 |  |  | 		if pInfo.Size != partInfo.Size { | 
					
						
							|  |  |  | 			return setRestoreHeaderFn(oi, InvalidObjectState{Bucket: bucket, Object: object}) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		uploadedParts = append(uploadedParts, CompletePart{ | 
					
						
							|  |  |  | 			PartNumber: pInfo.PartNumber, | 
					
						
							|  |  |  | 			ETag:       pInfo.ETag, | 
					
						
							|  |  |  | 		}) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-25 10:04:35 +08:00
										 |  |  | 	_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{ | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 		MTime: oi.ModTime, | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2021-04-27 09:24:06 +08:00
										 |  |  | 	return setRestoreHeaderFn(oi, err) | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | } |