| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  |  * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc. | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-15 00:38:05 +08:00
										 |  |  | 	"github.com/minio/minio-go/v7/pkg/tags" | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	xhttp "github.com/minio/minio/cmd/http" | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/mimedb" | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/sync/errgroup" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-21 08:57:12 +08:00
										 |  |  | // list all errors which can be ignored in object operations.
 | 
					
						
							| 
									
										
										
										
											2020-07-25 04:16:11 +08:00
										 |  |  | var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) | 
					
						
							| 
									
										
										
										
											2016-11-21 08:57:12 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | // putObjectDir hints the bottom layer to create a new directory.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) putObjectDir(ctx context.Context, bucket, object string, writeQuorum int) error { | 
					
						
							|  |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Prepare object creation in all disks
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	for index := range storageDisks { | 
					
						
							|  |  |  | 		if storageDisks[index] == nil { | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			err := storageDisks[index].MakeVol(pathJoin(bucket, object)) | 
					
						
							|  |  |  | 			if err != nil && err != errVolumeExists { | 
					
						
							|  |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | /// Object Operations
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | // CopyObject - copy object source object to destination object.
 | 
					
						
							|  |  |  | // if source object and destination object are same we only
 | 
					
						
							|  |  |  | // update metadata.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 	// This call shouldn't be used for anything other than metadata updates or adding self referential versions.
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	if !srcInfo.metadataOnly { | 
					
						
							|  |  |  | 		return oi, NotImplemented{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer ObjectPathUpdated(path.Join(dstBucket, dstObject)) | 
					
						
							| 
									
										
										
										
											2017-12-22 19:28:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							|  |  |  | 	metaArr, errs := readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID) | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// get Quorum for this object
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, er, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// List all online disks.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	onlineDisks, modTime := listOnlineDisks(storageDisks, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	// Pick latest valid metadata.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if srcOpts.VersionID == "" { | 
					
						
							|  |  |  | 			return oi, toObjectErr(errFileNotFound, srcBucket, srcObject) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject) | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-03 09:24:02 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 	versionID := srcInfo.VersionID | 
					
						
							|  |  |  | 	if srcInfo.versionOnly { | 
					
						
							|  |  |  | 		versionID = dstOpts.VersionID | 
					
						
							|  |  |  | 		// preserve destination versionId if specified.
 | 
					
						
							|  |  |  | 		if versionID == "" { | 
					
						
							|  |  |  | 			versionID = mustGetUUID() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		modTime = UTCNow() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fi.VersionID = versionID // set any new versionID we might have created
 | 
					
						
							|  |  |  | 	fi.ModTime = modTime     // set modTime for the new versionID
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Update `xl.meta` content on each disks.
 | 
					
						
							|  |  |  | 	for index := range metaArr { | 
					
						
							| 
									
										
										
										
											2020-08-04 07:21:10 +08:00
										 |  |  | 		metaArr[index].ModTime = modTime | 
					
						
							|  |  |  | 		metaArr[index].VersionID = versionID | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		metaArr[index].Metadata = srcInfo.UserDefined | 
					
						
							|  |  |  | 		metaArr[index].Metadata["etag"] = srcInfo.ETag | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 	tempObj := mustGetUUID() | 
					
						
							| 
									
										
										
										
											2019-04-25 22:33:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Cleanup in case of xl.meta writing failure
 | 
					
						
							|  |  |  | 	defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum) | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Write unique `xl.meta` for each disk.
 | 
					
						
							|  |  |  | 	if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Rename atomically `xl.meta` from tmp location to destination for each disk.
 | 
					
						
							|  |  |  | 	if _, err = renameFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2020-05-29 05:36:38 +08:00
										 |  |  | 		return oi, toObjectErr(err, srcBucket, srcObject) | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return fi.ToObjectInfo(srcBucket, srcObject), nil | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | // GetObjectNInfo - returns object info and an object
 | 
					
						
							|  |  |  | // Read(Closer). When err != nil, the returned reader is always nil.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	if err = checkGetObjArgs(ctx, bucket, object); err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Handler directory request by returning a reader that
 | 
					
						
							|  |  |  | 	// returns no bytes.
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 	if HasSuffix(object, SlashSeparator) { | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 		var objInfo ObjectInfo | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		if objInfo, err = er.getObjectInfoDir(ctx, bucket, object); err != nil { | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			return nil, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-04-21 13:01:59 +08:00
										 |  |  | 		return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	objInfo := fi.ToObjectInfo(bucket, object) | 
					
						
							|  |  |  | 	if objInfo.DeleteMarker { | 
					
						
							|  |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return &GetObjectReader{ | 
					
						
							|  |  |  | 				ObjInfo: objInfo, | 
					
						
							|  |  |  | 			}, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 		return &GetObjectReader{ | 
					
						
							|  |  |  | 			ObjInfo: objInfo, | 
					
						
							|  |  |  | 		}, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	if nErr != nil { | 
					
						
							|  |  |  | 		return nil, nErr | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pr, pw := io.Pipe() | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		err := er.getObjectWithFileInfo(ctx, bucket, object, off, length, pw, "", opts, fi, metaArr, onlineDisks) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 		pw.CloseWithError(err) | 
					
						
							|  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2020-04-21 13:01:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-22 02:42:06 +08:00
										 |  |  | 	// Cleanup function to cause the go routine above to exit, in
 | 
					
						
							|  |  |  | 	// case of incomplete read.
 | 
					
						
							|  |  |  | 	pipeCloser := func() { pr.Close() } | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-18 04:01:22 +08:00
										 |  |  | 	return fn(pr, h, opts.CheckPrecondFn, pipeCloser) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // GetObject - reads an object erasured coded across multiple
 | 
					
						
							|  |  |  | // disks. Supports additional parameters like offset and length
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | // which are synonymous with HTTP Range requests.
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | //
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | // startOffset indicates the starting read location of the object.
 | 
					
						
							|  |  |  | // length indicates the total length of the object.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkGetObjArgs(ctx, bucket, object); err != nil { | 
					
						
							| 
									
										
										
										
											2016-12-02 15:15:17 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-22 03:29:32 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Start offset cannot be negative.
 | 
					
						
							|  |  |  | 	if startOffset < 0 { | 
					
						
							| 
									
										
										
										
											2019-10-12 09:50:54 +08:00
										 |  |  | 		logger.LogIf(ctx, errUnexpected, logger.Application) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return errUnexpected | 
					
						
							| 
									
										
										
										
											2016-07-07 16:30:34 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-22 03:29:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-08 22:46:49 +08:00
										 |  |  | 	// Writer cannot be nil.
 | 
					
						
							|  |  |  | 	if writer == nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errUnexpected) | 
					
						
							|  |  |  | 		return errUnexpected | 
					
						
							| 
									
										
										
										
											2016-07-08 22:46:49 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-09-01 02:39:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	// If its a directory request, we return an empty body.
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 	if HasSuffix(object, SlashSeparator) { | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 		_, err := writer.Write([]byte("")) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return er.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2016-07-25 13:49:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error { | 
					
						
							| 
									
										
										
										
											2016-07-25 13:49:27 +08:00
										 |  |  | 	// Reorder online disks based on erasure distribution order.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) | 
					
						
							| 
									
										
										
										
											2016-07-25 13:49:27 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Reorder parts metadata based on erasure distribution order.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	metaArr = shufflePartsMetadata(metaArr, fi.Erasure.Distribution) | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-22 03:29:32 +08:00
										 |  |  | 	// For negative length read everything.
 | 
					
						
							|  |  |  | 	if length < 0 { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		length = fi.Size - startOffset | 
					
						
							| 
									
										
										
										
											2016-07-08 22:46:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-22 03:29:32 +08:00
										 |  |  | 	// Reply back invalid range if the input offset and length fall out of range.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if startOffset > fi.Size || startOffset+length > fi.Size { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, InvalidRange{startOffset, length, fi.Size}, logger.Application) | 
					
						
							|  |  |  | 		return InvalidRange{startOffset, length, fi.Size} | 
					
						
							| 
									
										
										
										
											2016-07-08 22:46:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	// Get start part index and offset.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	partIndex, partOffset, err := fi.ObjectToPartOffset(ctx, startOffset) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return InvalidRange{startOffset, length, fi.Size} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-28 02:51:02 +08:00
										 |  |  | 	// Calculate endOffset according to length
 | 
					
						
							|  |  |  | 	endOffset := startOffset | 
					
						
							|  |  |  | 	if length > 0 { | 
					
						
							|  |  |  | 		endOffset += length - 1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	// Get last part index to read given length.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	lastPartIndex, _, err := fi.ObjectToPartOffset(ctx, endOffset) | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return InvalidRange{startOffset, length, fi.Size} | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 	var totalBytesRead int64 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 	var healOnce sync.Once | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 	for ; partIndex <= lastPartIndex; partIndex++ { | 
					
						
							| 
									
										
										
										
											2016-06-22 05:34:11 +08:00
										 |  |  | 		if length == totalBytesRead { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partNumber := fi.Parts[partIndex].Number | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		// Save the current part name and size.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partSize := fi.Parts[partIndex].Size | 
					
						
							| 
									
										
										
										
											2016-06-23 00:05:03 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 		partLength := partSize - partOffset | 
					
						
							|  |  |  | 		// partLength should be adjusted so that we don't write more data than what was requested.
 | 
					
						
							|  |  |  | 		if partLength > (length - totalBytesRead) { | 
					
						
							|  |  |  | 			partLength = length - totalBytesRead | 
					
						
							| 
									
										
										
										
											2016-06-20 04:35:26 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		tillOffset := erasure.ShardFileOffset(partOffset, partLength, partSize) | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 		// Get the checksums of the current part.
 | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		readers := make([]io.ReaderAt, len(onlineDisks)) | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 		prefer := make([]bool, len(onlineDisks)) | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 		for index, disk := range onlineDisks { | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 			if disk == OfflineDisk { | 
					
						
							| 
									
										
										
										
											2016-07-25 13:49:27 +08:00
										 |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 			checksumInfo := metaArr[index].Erasure.GetChecksumInfo(partNumber) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			partPath := pathJoin(object, metaArr[index].DataDir, fmt.Sprintf("part.%d", partNumber)) | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 			readers[index] = newBitrotReader(disk, bucket, partPath, tillOffset, | 
					
						
							|  |  |  | 				checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize()) | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// Prefer local disks
 | 
					
						
							|  |  |  | 			prefer[index] = disk.Hostname() == "" | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		err = erasure.Decode(ctx, writer, readers, partOffset, partLength, partSize, prefer) | 
					
						
							|  |  |  | 		// Note: we should not be defer'ing the following closeBitrotReaders() call as
 | 
					
						
							|  |  |  | 		// we are inside a for loop i.e if we use defer, we would accumulate a lot of open files by the time
 | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		// we return from this function.
 | 
					
						
							|  |  |  | 		closeBitrotReaders(readers) | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2020-04-02 03:14:00 +08:00
										 |  |  | 			if decodeHealErr, ok := err.(*errDecodeHealRequired); ok { | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 				healOnce.Do(func() { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					go deepHealObject(bucket, object, fi.VersionID) | 
					
						
							| 
									
										
											  
											
												Prefer local disks when fetching data blocks (#9563)
If the requested server is part of the set this will always read 
from the local disk, even if the disk contains a parity shard. 
In default setup there is a 50% chance that at least 
one shard that otherwise would have been fetched remotely 
will be read locally instead.
It basically trades RPC call overhead for reed-solomon. 
On distributed localhost this seems to be fairly break-even, 
with a very small gain in throughput and latency. 
However on networked servers this should be a bigger
1MB objects, before:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 76257:
 * Avg: 25ms 50%: 24ms 90%: 32ms 99%: 42ms Fastest: 7ms Slowest: 67ms
 * First Byte: Average: 23ms, Median: 22ms, Best: 5ms, Worst: 65ms
Throughput:
* Average: 1213.68 MiB/s, 1272.63 obj/s (59.948s, starting 14:45:44 CEST)
```
After:
```
Operation: GET. Concurrency: 32. Hosts: 4.
Requests considered: 78845:
 * Avg: 24ms 50%: 24ms 90%: 31ms 99%: 39ms Fastest: 8ms Slowest: 62ms
 * First Byte: Average: 22ms, Median: 21ms, Best: 6ms, Worst: 57ms
Throughput:
* Average: 1255.11 MiB/s, 1316.08 obj/s (59.938s, starting 14:43:58 CEST)
```
Bonus fix: Only ask for heal once on an object.
											
										 
											2020-05-27 07:47:23 +08:00
										 |  |  | 				}) | 
					
						
							| 
									
										
										
										
											2020-04-02 03:14:00 +08:00
										 |  |  | 				err = decodeHealErr.err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		for i, r := range readers { | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 			if r == nil { | 
					
						
							|  |  |  | 				onlineDisks[i] = OfflineDisk | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-07-28 17:20:34 +08:00
										 |  |  | 		// Track total bytes read from disk and written to the client.
 | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 		totalBytesRead += partLength | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-23 00:05:03 +08:00
										 |  |  | 		// partOffset will be valid only for the first part, hence reset it to 0 for
 | 
					
						
							|  |  |  | 		// the remaining parts.
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 		partOffset = 0 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 	} // End of read all parts loop.
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Return success.
 | 
					
						
							| 
									
										
										
										
											2016-05-29 06:13:15 +08:00
										 |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // getObject wrapper for erasure GetObject
 | 
					
						
							|  |  |  | func (er erasureObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							|  |  |  | 		return toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return er.getObjectWithFileInfo(ctx, bucket, object, startOffset, length, writer, etag, opts, fi, metaArr, onlineDisks) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | // getObjectInfoDir - This getObjectInfo is specific to object directory lookup.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) getObjectInfoDir(ctx context.Context, bucket, object string) (ObjectInfo, error) { | 
					
						
							|  |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Prepare object creation in a all disks
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	for index, disk := range storageDisks { | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 			// Check if 'prefix' is an object on this 'disk'.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			entries, err := storageDisks[index].ListDir(bucket, object, 1) | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if len(entries) > 0 { | 
					
						
							|  |  |  | 				// Not a directory if not empty.
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 				return errFileNotFound | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-01 00:32:16 +08:00
										 |  |  | 	readQuorum := getReadQuorum(len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	err := reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) | 
					
						
							|  |  |  | 	return dirObjectInfo(bucket, object, 0, map[string]string{}), err | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // GetObjectInfo - reads object metadata and replies back ObjectInfo.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) { | 
					
						
							|  |  |  | 	if err = checkGetObjArgs(ctx, bucket, object); err != nil { | 
					
						
							|  |  |  | 		return info, err | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-09-01 02:39:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 	if HasSuffix(object, SlashSeparator) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		info, err = er.getObjectInfoDir(ctx, bucket, object) | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			return info, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-02-02 16:34:15 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 		return info, nil | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 	return er.getObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI, err error) { | 
					
						
							|  |  |  | 	disks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2018-10-20 02:00:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID) | 
					
						
							| 
									
										
										
										
											2018-10-20 02:00:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	readQuorum, _, err := objectQuorumFromMeta(ctx, er, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2019-02-06 09:58:48 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return fi, nil, nil, err | 
					
						
							| 
									
										
										
										
											2018-02-02 02:47:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return fi, nil, nil, toObjectErr(reducedErr, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	// List all online disks.
 | 
					
						
							|  |  |  | 	onlineDisks, modTime := listOnlineDisks(disks, metaArr, errs) | 
					
						
							| 
									
										
										
										
											2018-02-02 02:47:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Pick latest valid metadata.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	fi, err = pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2016-05-25 16:33:39 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return fi, nil, nil, err | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return fi, metaArr, onlineDisks, nil | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) getObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 	fi, _, _, err := er.getObjectFileInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-07-03 07:17:27 +08:00
										 |  |  | 		return objInfo, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-05-28 07:14:26 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if fi.Deleted { | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 		objInfo = fi.ToObjectInfo(bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return objInfo, toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Make sure to return object info to provide extra information.
 | 
					
						
							| 
									
										
										
										
											2020-06-17 23:33:14 +08:00
										 |  |  | 		return objInfo, toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return fi.ToObjectInfo(bucket, object), nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | func undoRename(disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, errs []error) { | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | 	// Undo rename object on disks where RenameFile succeeded.
 | 
					
						
							| 
									
										
										
										
											2016-06-21 10:11:55 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// If srcEntry/dstEntry are objects then add a trailing slash to copy
 | 
					
						
							|  |  |  | 	// over all the parts inside the object directory
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 	if isDir { | 
					
						
							| 
									
										
										
										
											2016-06-21 10:11:55 +08:00
										 |  |  | 		srcEntry = retainSlash(srcEntry) | 
					
						
							|  |  |  | 		dstEntry = retainSlash(dstEntry) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							| 
									
										
										
										
											2016-07-12 13:53:54 +08:00
										 |  |  | 	for index, disk := range disks { | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if errs[index] == nil { | 
					
						
							|  |  |  | 				_ = disks[index].RenameFile(dstBucket, dstEntry, srcBucket, srcEntry) | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	g.Wait() | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // Similar to rename but renames data from srcEntry to dstEntry at dataDir
 | 
					
						
							|  |  |  | func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dataDir, dstBucket, dstEntry string, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) { | 
					
						
							|  |  |  | 	dataDir = retainSlash(dataDir) | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Rename file on all underlying storage disks.
 | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if err := disks[index].RenameData(srcBucket, srcEntry, dataDir, dstBucket, dstEntry); err != nil { | 
					
						
							|  |  |  | 				if !IsErrIgnored(err, ignoredErr...) { | 
					
						
							|  |  |  | 					return err | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for all renames to finish.
 | 
					
						
							|  |  |  | 	errs := g.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum
 | 
					
						
							|  |  |  | 	// otherwise return failure. Cleanup successful renames.
 | 
					
						
							|  |  |  | 	err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) | 
					
						
							|  |  |  | 	if err == errErasureWriteQuorum { | 
					
						
							|  |  |  | 		ug := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 		for index, disk := range disks { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			index := index | 
					
						
							|  |  |  | 			ug.Go(func() error { | 
					
						
							|  |  |  | 				// Undo all the partial rename operations.
 | 
					
						
							|  |  |  | 				if errs[index] == nil { | 
					
						
							|  |  |  | 					_ = disks[index].RenameData(dstBucket, dstEntry, dataDir, srcBucket, srcEntry) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			}, index) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		ug.Wait() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return evalDisks(disks, errs), err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-21 10:11:55 +08:00
										 |  |  | // rename - common function that renamePart and renameObject use to rename
 | 
					
						
							|  |  |  | // the respective underlying storage layer representations.
 | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, isDir bool, writeQuorum int, ignoredErr []error) ([]StorageAPI, error) { | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 	if isDir { | 
					
						
							| 
									
										
										
										
											2016-06-21 10:11:55 +08:00
										 |  |  | 		dstEntry = retainSlash(dstEntry) | 
					
						
							|  |  |  | 		srcEntry = retainSlash(srcEntry) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	// Rename file on all underlying storage disks.
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if err := disks[index].RenameFile(srcBucket, srcEntry, dstBucket, dstEntry); err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | 				if !IsErrIgnored(err, ignoredErr...) { | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 					return err | 
					
						
							| 
									
										
										
										
											2018-04-12 08:15:42 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 	// Wait for all renames to finish.
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	errs := g.Wait() | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum
 | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	// otherwise return failure. Cleanup successful renames.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if err == errErasureWriteQuorum { | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | 		// Undo all the partial rename operations.
 | 
					
						
							| 
									
										
										
										
											2016-12-27 08:29:26 +08:00
										 |  |  | 		undoRename(disks, srcBucket, srcEntry, dstBucket, dstEntry, isDir, errs) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | 	return evalDisks(disks, errs), err | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // PutObject - creates an object upon reading from the input stream
 | 
					
						
							|  |  |  | // until EOF, erasure codes the data across all disk and additionally
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // writes `xl.meta` which carries the necessary metadata for future
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // object operations.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-10 07:19:30 +08:00
										 |  |  | 	// Validate put object input args.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if err = checkPutObjectArgs(ctx, bucket, object, er, data.Size()); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-10 07:19:30 +08:00
										 |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return er.putObject(ctx, bucket, object, data, opts) | 
					
						
							| 
									
										
										
										
											2018-01-13 12:34:52 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // putObject wrapper for erasureObjects PutObject
 | 
					
						
							|  |  |  | func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	defer ObjectPathUpdated(path.Join(bucket, object)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	data := r.Reader | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	uniqueID := mustGetUUID() | 
					
						
							|  |  |  | 	tempObj := uniqueID | 
					
						
							|  |  |  | 	// No metadata is set, allocate a new one.
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 	if opts.UserDefined == nil { | 
					
						
							|  |  |  | 		opts.UserDefined = make(map[string]string) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	// Get parity and data drive count based on storage class metadata
 | 
					
						
							| 
									
										
										
										
											2019-10-23 13:59:13 +08:00
										 |  |  | 	parityDrives := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass]) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if parityDrives == 0 { | 
					
						
							| 
									
										
										
										
											2020-04-01 00:32:16 +08:00
										 |  |  | 		parityDrives = getDefaultParityBlocks(len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	dataDrives := len(storageDisks) - parityDrives | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// we now know the number of blocks this object needs for data and parity.
 | 
					
						
							|  |  |  | 	// writeQuorum is dataBlocks + 1
 | 
					
						
							| 
									
										
										
										
											2020-06-10 10:19:03 +08:00
										 |  |  | 	writeQuorum := dataDrives | 
					
						
							|  |  |  | 	if dataDrives == parityDrives { | 
					
						
							|  |  |  | 		writeQuorum = dataDrives + 1 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Delete temporary object in the event of failure.
 | 
					
						
							|  |  |  | 	// If PutObject succeeded there would be no temporary
 | 
					
						
							|  |  |  | 	// object to delete.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	defer er.deleteObject(ctx, minioMetaTmpBucket, tempObj, writeQuorum) | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-21 08:33:01 +08:00
										 |  |  | 	// This is a special case with size as '0' and object ends with
 | 
					
						
							|  |  |  | 	// a slash separator, we treat it like a valid operation and
 | 
					
						
							|  |  |  | 	// return success.
 | 
					
						
							| 
									
										
										
										
											2017-09-20 03:40:27 +08:00
										 |  |  | 	if isObjectDir(object, data.Size()) { | 
					
						
							| 
									
										
										
										
											2017-02-22 11:43:44 +08:00
										 |  |  | 		// Check if an object is present as one of the parent dir.
 | 
					
						
							|  |  |  | 		// -- FIXME. (needs a new kind of lock).
 | 
					
						
							| 
									
										
										
										
											2017-05-10 05:32:24 +08:00
										 |  |  | 		// -- FIXME (this also causes performance issue when disks are down).
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { | 
					
						
							| 
									
										
										
										
											2019-03-21 04:06:53 +08:00
										 |  |  | 			return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-02-22 11:43:44 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		if err = er.putObjectDir(ctx, bucket, object, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 			return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 		return dirObjectInfo(bucket, object, data.Size(), opts.UserDefined), nil | 
					
						
							| 
									
										
										
										
											2017-01-21 08:33:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-22 11:43:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	// Validate input data size and it can never be less than zero.
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	if data.Size() < -1 { | 
					
						
							| 
									
										
										
										
											2019-10-12 09:50:54 +08:00
										 |  |  | 		logger.LogIf(ctx, errInvalidArgument, logger.Application) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(errInvalidArgument) | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-22 11:43:44 +08:00
										 |  |  | 	// Check if an object is present as one of the parent dir.
 | 
					
						
							|  |  |  | 	// -- FIXME. (needs a new kind of lock).
 | 
					
						
							| 
									
										
										
										
											2017-05-10 05:32:24 +08:00
										 |  |  | 	// -- FIXME (this also causes performance issue when disks are down).
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if er.parentDirIsObject(ctx, bucket, path.Dir(object)) { | 
					
						
							| 
									
										
										
										
											2019-03-21 04:06:53 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(errFileParentIsFile, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-02-22 11:43:44 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	// Initialize parts metadata
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	partsMetadata := make([]FileInfo, len(er.getDisks())) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fi := newFileInfo(object, dataDrives, parityDrives) | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if opts.Versioned { | 
					
						
							| 
									
										
										
										
											2020-06-18 02:13:41 +08:00
										 |  |  | 		fi.VersionID = opts.VersionID | 
					
						
							|  |  |  | 		if fi.VersionID == "" { | 
					
						
							|  |  |  | 			fi.VersionID = mustGetUUID() | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	fi.DataDir = mustGetUUID() | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Initialize erasure metadata.
 | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	for index := range partsMetadata { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partsMetadata[index] = fi | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Order disks according to erasure distribution
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	onlineDisks := shuffleDisks(storageDisks, fi.Erasure.Distribution) | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
 | 
					
						
							| 
									
										
										
										
											2018-06-14 02:55:12 +08:00
										 |  |  | 	var buffer []byte | 
					
						
							|  |  |  | 	switch size := data.Size(); { | 
					
						
							|  |  |  | 	case size == 0: | 
					
						
							|  |  |  | 		buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	case size == -1 || size >= fi.Erasure.BlockSize: | 
					
						
							|  |  |  | 		buffer = er.bp.Get() | 
					
						
							|  |  |  | 		defer er.bp.Put(buffer) | 
					
						
							|  |  |  | 	case size < fi.Erasure.BlockSize: | 
					
						
							| 
									
										
										
										
											2018-06-14 02:55:12 +08:00
										 |  |  | 		// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) | 
					
						
							| 
									
										
										
										
											2018-06-14 02:55:12 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if len(buffer) > int(fi.Erasure.BlockSize) { | 
					
						
							|  |  |  | 		buffer = buffer[:fi.Erasure.BlockSize] | 
					
						
							| 
									
										
										
										
											2018-08-07 06:14:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	partName := "part.1" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	tempErasureObj := pathJoin(uniqueID, fi.DataDir, partName) | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	writers := make([]io.Writer, len(onlineDisks)) | 
					
						
							|  |  |  | 	for i, disk := range onlineDisks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 		writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum) | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	closeBitrotWriters(writers) | 
					
						
							|  |  |  | 	if erasureErr != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	// Should return IncompleteBody{} error when reader has fewer bytes
 | 
					
						
							|  |  |  | 	// than specified in request header.
 | 
					
						
							|  |  |  | 	if n < data.Size() { | 
					
						
							| 
									
										
										
										
											2019-10-12 09:50:54 +08:00
										 |  |  | 		logger.LogIf(ctx, IncompleteBody{}, logger.Application) | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 		return ObjectInfo{}, IncompleteBody{} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-01 07:34:49 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	for i, w := range writers { | 
					
						
							|  |  |  | 		if w == nil { | 
					
						
							|  |  |  | 			onlineDisks[i] = nil | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 		partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize()) | 
					
						
							|  |  |  | 		partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ | 
					
						
							|  |  |  | 			PartNumber: 1, | 
					
						
							|  |  |  | 			Algorithm:  DefaultBitrotAlgorithm, | 
					
						
							|  |  |  | 			Hash:       bitrotWriterSum(w), | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2016-07-19 10:06:48 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-08-13 08:32:24 +08:00
										 |  |  | 	if opts.UserDefined["etag"] == "" { | 
					
						
							|  |  |  | 		opts.UserDefined["etag"] = r.MD5CurrentHexString() | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-17 12:42:02 +08:00
										 |  |  | 	// Guess content-type from the extension if possible.
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 	if opts.UserDefined["content-type"] == "" { | 
					
						
							|  |  |  | 		opts.UserDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object)) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-09 08:36:56 +08:00
										 |  |  | 	modTime := opts.MTime | 
					
						
							|  |  |  | 	if opts.MTime.IsZero() { | 
					
						
							|  |  |  | 		modTime = UTCNow() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-27 10:55:48 +08:00
										 |  |  | 	// Fill all the necessary metadata.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Update `xl.meta` content on each disks.
 | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 	for index := range partsMetadata { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		partsMetadata[index].Metadata = opts.UserDefined | 
					
						
							|  |  |  | 		partsMetadata[index].Size = n | 
					
						
							|  |  |  | 		partsMetadata[index].ModTime = modTime | 
					
						
							| 
									
										
										
										
											2016-06-01 11:23:31 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Write unique `xl.meta` for each disk.
 | 
					
						
							|  |  |  | 	if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-03 03:18:35 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-29 15:42:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | 	// Rename the successfully written temporary object to final location.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, fi.DataDir, bucket, object, writeQuorum, nil); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-03 03:18:35 +08:00
										 |  |  | 		return ObjectInfo{}, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-05-29 06:13:15 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-29 15:42:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 	// Whether a disk was initially or becomes offline
 | 
					
						
							|  |  |  | 	// during this upload, send it to the MRF list.
 | 
					
						
							|  |  |  | 	for i := 0; i < len(onlineDisks); i++ { | 
					
						
							|  |  |  | 		if onlineDisks[i] == nil || storageDisks[i] == nil { | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 			er.addPartial(bucket, object, fi.VersionID) | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for i := 0; i < len(onlineDisks); i++ { | 
					
						
							|  |  |  | 		if onlineDisks[i] == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Object info is the same in all disks, so we can pick
 | 
					
						
							|  |  |  | 		// the first meta from online disk
 | 
					
						
							|  |  |  | 		fi = partsMetadata[i] | 
					
						
							|  |  |  | 		break | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return fi.ToObjectInfo(bucket, object), nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object string, writeQuorum int, fi FileInfo) error { | 
					
						
							|  |  |  | 	disks := er.getDisks() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			err := disks[index].DeleteVersion(bucket, object, fi) | 
					
						
							|  |  |  | 			if err != nil && err != errVolumeNotFound { | 
					
						
							|  |  |  | 				return err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2016-09-03 03:18:35 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 11:23:43 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// return errors if any during deletion
 | 
					
						
							|  |  |  | 	return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // deleteObject - wrapper for delete object, deletes an object from
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // all the disks in parallel, including `xl.meta` associated with the
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // object.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) deleteObject(ctx context.Context, bucket, object string, writeQuorum int) error { | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 	var disks []StorageAPI | 
					
						
							| 
									
										
										
										
											2018-01-30 10:43:13 +08:00
										 |  |  | 	var err error | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	defer ObjectPathUpdated(path.Join(bucket, object)) | 
					
						
							| 
									
										
										
										
											2018-02-21 07:33:26 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 	tmpObj := mustGetUUID() | 
					
						
							|  |  |  | 	if bucket == minioMetaTmpBucket { | 
					
						
							|  |  |  | 		tmpObj = object | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		disks = er.getDisks() | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2018-10-05 08:22:49 +08:00
										 |  |  | 		// Rename the current object while requiring write quorum, but also consider
 | 
					
						
							|  |  |  | 		// that a non found object in a given disk as a success since it already
 | 
					
						
							|  |  |  | 		// confirms that the object doesn't have a part in that disk (already removed)
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		disks, err = rename(ctx, er.getDisks(), bucket, object, minioMetaTmpBucket, tmpObj, true, writeQuorum, | 
					
						
							|  |  |  | 			[]error{errFileNotFound}) | 
					
						
							| 
									
										
										
										
											2018-03-27 07:39:28 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 			return toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-03-27 07:39:28 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-12-22 19:28:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			err := cleanupDir(ctx, disks[index], minioMetaTmpBucket, tmpObj) | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 			if err != nil && err != errVolumeNotFound { | 
					
						
							|  |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2016-05-26 05:32:49 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-27 07:39:28 +08:00
										 |  |  | 	// return errors if any during deletion
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // DeleteObjects deletes objects/versions in bulk, this function will still automatically split objects list
 | 
					
						
							|  |  |  | // into smaller bulks if some object names are found to be duplicated in the delete list, splitting
 | 
					
						
							|  |  |  | // into smaller bulks will avoid holding twice the write lock of the duplicated object names.
 | 
					
						
							|  |  |  | func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { | 
					
						
							|  |  |  | 	errs := make([]error, len(objects)) | 
					
						
							|  |  |  | 	dobjects := make([]DeletedObject, len(objects)) | 
					
						
							|  |  |  | 	writeQuorums := make([]int, len(objects)) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for i, object := range objects { | 
					
						
							|  |  |  | 		errs[i] = checkDelObjArgs(ctx, bucket, object.ObjectName) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	storageDisks := er.getDisks() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := range objects { | 
					
						
							|  |  |  | 		if errs[i] != nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		// Assume (N/2 + 1) quorums for all objects
 | 
					
						
							|  |  |  | 		// this is a theoretical assumption such that
 | 
					
						
							|  |  |  | 		// for delete's we do not need to honor storage
 | 
					
						
							|  |  |  | 		// class for objects which have reduced quorum
 | 
					
						
							|  |  |  | 		// storage class only needs to be honored for
 | 
					
						
							|  |  |  | 		// Read() requests alone which we already do.
 | 
					
						
							|  |  |  | 		writeQuorums[i] = getWriteQuorum(len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	versions := make([]FileInfo, len(objects)) | 
					
						
							|  |  |  | 	for i := range objects { | 
					
						
							|  |  |  | 		if objects[i].VersionID == "" { | 
					
						
							|  |  |  | 			if opts.Versioned && !HasSuffix(objects[i].ObjectName, SlashSeparator) { | 
					
						
							|  |  |  | 				versions[i] = FileInfo{ | 
					
						
							|  |  |  | 					Name:      objects[i].ObjectName, | 
					
						
							|  |  |  | 					VersionID: mustGetUUID(), | 
					
						
							|  |  |  | 					ModTime:   UTCNow(), | 
					
						
							|  |  |  | 					Deleted:   true, // delete marker
 | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		versions[i] = FileInfo{ | 
					
						
							|  |  |  | 			Name:      objects[i].ObjectName, | 
					
						
							|  |  |  | 			VersionID: objects[i].VersionID, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-07 05:44:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	// Initialize list of errors.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	var opErrs = make([]error, len(storageDisks)) | 
					
						
							|  |  |  | 	var delObjErrs = make([][]error, len(storageDisks)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	// Remove versions in bulk for each disk
 | 
					
						
							|  |  |  | 	for index, disk := range storageDisks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			opErrs[index] = errDiskNotFound | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(index int, disk StorageAPI) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			delObjErrs[index] = disk.DeleteVersions(bucket, versions) | 
					
						
							|  |  |  | 		}(index, disk) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	// Reduce errors for each object
 | 
					
						
							|  |  |  | 	for objIndex := range objects { | 
					
						
							|  |  |  | 		if errs[objIndex] != nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		diskErrs := make([]error, len(storageDisks)) | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 		// Iterate over disks to fetch the error
 | 
					
						
							|  |  |  | 		// of deleting of the current object
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		for i := range delObjErrs { | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 			// delObjErrs[i] is not nil when disks[i] is also not nil
 | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 			if delObjErrs[i] != nil { | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 				if delObjErrs[i][objIndex] != errFileNotFound { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					diskErrs[i] = delObjErrs[i][objIndex] | 
					
						
							| 
									
										
										
										
											2020-03-11 23:56:36 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		errs[objIndex] = reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) | 
					
						
							|  |  |  | 		if errs[objIndex] == nil { | 
					
						
							|  |  |  | 			if versions[objIndex].Deleted { | 
					
						
							|  |  |  | 				dobjects[objIndex] = DeletedObject{ | 
					
						
							|  |  |  | 					DeleteMarker:          versions[objIndex].Deleted, | 
					
						
							|  |  |  | 					DeleteMarkerVersionID: versions[objIndex].VersionID, | 
					
						
							|  |  |  | 					ObjectName:            versions[objIndex].Name, | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				dobjects[objIndex] = DeletedObject{ | 
					
						
							|  |  |  | 					ObjectName: versions[objIndex].Name, | 
					
						
							|  |  |  | 					VersionID:  versions[objIndex].VersionID, | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	// Check failed deletes across multiple objects
 | 
					
						
							|  |  |  | 	for _, version := range versions { | 
					
						
							|  |  |  | 		// Check if there is any offline disk and add it to the MRF list
 | 
					
						
							|  |  |  | 		for _, disk := range storageDisks { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							|  |  |  | 				// ignore delete markers for quorum
 | 
					
						
							|  |  |  | 				if version.Deleted { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				// all other direct versionId references we should
 | 
					
						
							|  |  |  | 				// ensure no dangling file is left over.
 | 
					
						
							|  |  |  | 				er.addPartial(bucket, version.Name, version.VersionID) | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return dobjects, errs | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-02 07:43:31 +08:00
										 |  |  | // DeleteObject - deletes an object, this call doesn't necessary reply
 | 
					
						
							|  |  |  | // any error as it is not necessary for the handler to reply back a
 | 
					
						
							|  |  |  | // response to the client request.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err = checkDelObjArgs(ctx, bucket, object); err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return objInfo, err | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-09-01 02:39:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	storageDisks := er.getDisks() | 
					
						
							|  |  |  | 	writeQuorum := len(storageDisks)/2 + 1 | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if opts.VersionID == "" { | 
					
						
							|  |  |  | 		if opts.Versioned && !HasSuffix(object, SlashSeparator) { | 
					
						
							|  |  |  | 			fi := FileInfo{ | 
					
						
							|  |  |  | 				Name:      object, | 
					
						
							|  |  |  | 				VersionID: mustGetUUID(), | 
					
						
							|  |  |  | 				Deleted:   true, | 
					
						
							|  |  |  | 				ModTime:   UTCNow(), | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			// Add delete marker, since we don't have any version specified explicitly.
 | 
					
						
							|  |  |  | 			if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, fi); err != nil { | 
					
						
							|  |  |  | 				return objInfo, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return fi.ToObjectInfo(bucket, object), nil | 
					
						
							| 
									
										
										
										
											2018-09-01 04:16:35 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-08-30 04:36:19 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-17 13:18:43 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Delete the object version on all disks.
 | 
					
						
							|  |  |  | 	if err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{ | 
					
						
							|  |  |  | 		Name:      object, | 
					
						
							|  |  |  | 		VersionID: opts.VersionID, | 
					
						
							|  |  |  | 	}); err != nil { | 
					
						
							|  |  |  | 		return objInfo, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-06-08 02:35:03 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	for _, disk := range storageDisks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			er.addPartial(bucket, object, opts.VersionID) | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return ObjectInfo{Bucket: bucket, Name: object, VersionID: opts.VersionID}, nil | 
					
						
							| 
									
										
										
										
											2018-02-10 07:19:30 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | // Send the successful but partial upload/delete, however ignore
 | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | // if the channel is blocked by other items.
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | func (er erasureObjects) addPartial(bucket, object, versionID string) { | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 	select { | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	case er.mrfOpCh <- partialOperation{bucket: bucket, object: object, versionID: versionID}: | 
					
						
							| 
									
										
										
										
											2020-01-16 10:30:32 +08:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // PutObjectTags - replace or add tags to an existing object
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	disks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Read metadata associated with the object from all disks.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	metaArr, errs := readAllFileInfo(ctx, disks, bucket, object, opts.VersionID) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, er, metaArr, errs) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// List all online disks.
 | 
					
						
							|  |  |  | 	_, modTime := listOnlineDisks(disks, metaArr, errs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Pick latest valid metadata.
 | 
					
						
							|  |  |  | 	fi, err := pickValidFileInfo(ctx, metaArr, modTime, readQuorum) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-05-06 05:18:13 +08:00
										 |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		if opts.VersionID == "" { | 
					
						
							|  |  |  | 			return toObjectErr(errFileNotFound, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return toObjectErr(errMethodNotAllowed, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, fi := range metaArr { | 
					
						
							| 
									
										
										
										
											2020-06-15 01:50:40 +08:00
										 |  |  | 		if errs[i] != nil { | 
					
						
							|  |  |  | 			// Avoid disks where loading metadata fail
 | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		// clean fi.Meta of tag key, before updating the new tags
 | 
					
						
							|  |  |  | 		delete(fi.Metadata, xhttp.AmzObjectTagging) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 		// Don't update for empty tags
 | 
					
						
							|  |  |  | 		if tags != "" { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			fi.Metadata[xhttp.AmzObjectTagging] = tags | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		metaArr[i].Metadata = fi.Metadata | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tempObj := mustGetUUID() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Write unique `xl.meta` for each disk.
 | 
					
						
							|  |  |  | 	if disks, err = writeUniqueFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Atomically rename metadata from tmp location to destination for each disk.
 | 
					
						
							|  |  |  | 	if _, err = renameFileInfo(ctx, disks, minioMetaTmpBucket, tempObj, bucket, object, writeQuorum); err != nil { | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // DeleteObjectTags - delete object tags from an existing object
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	return er.PutObjectTags(ctx, bucket, object, "", opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-24 02:09:35 +08:00
										 |  |  | // GetObjectTags - get object tags from an existing object
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	// GetObjectInfo will return tag value as well
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	oi, err := er.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-05-06 05:18:13 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-06 05:18:13 +08:00
										 |  |  | 	return tags.ParseObjectTags(oi.UserTags) | 
					
						
							| 
									
										
										
										
											2020-01-21 00:45:59 +08:00
										 |  |  | } |