| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2021-02-25 01:00:15 +08:00
										 |  |  | 	"crypto/sha256" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	"encoding/hex" | 
					
						
							|  |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"net/http" | 
					
						
							|  |  |  | 	"sort" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	xhttp "github.com/minio/minio/cmd/http" | 
					
						
							|  |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2020-07-22 08:49:56 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/bucket/replication" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/sync/errgroup" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const erasureAlgorithm = "rs-vandermonde" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // byObjectPartNumber is a collection satisfying sort.Interface.
 | 
					
						
							|  |  |  | type byObjectPartNumber []ObjectPartInfo | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (t byObjectPartNumber) Len() int           { return len(t) } | 
					
						
							|  |  |  | func (t byObjectPartNumber) Swap(i, j int)      { t[i], t[j] = t[j], t[i] } | 
					
						
							|  |  |  | func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // AddChecksumInfo adds a checksum of a part.
 | 
					
						
							|  |  |  | func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) { | 
					
						
							|  |  |  | 	for i, sum := range e.Checksums { | 
					
						
							|  |  |  | 		if sum.PartNumber == ckSumInfo.PartNumber { | 
					
						
							|  |  |  | 			e.Checksums[i] = ckSumInfo | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	e.Checksums = append(e.Checksums, ckSumInfo) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetChecksumInfo - get checksum of a part.
 | 
					
						
							|  |  |  | func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) { | 
					
						
							|  |  |  | 	for _, sum := range e.Checksums { | 
					
						
							|  |  |  | 		if sum.PartNumber == partNumber { | 
					
						
							|  |  |  | 			// Return the checksum
 | 
					
						
							|  |  |  | 			return sum | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return ChecksumInfo{} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ShardFileSize - returns final erasure size from original size.
 | 
					
						
							|  |  |  | func (e ErasureInfo) ShardFileSize(totalLength int64) int64 { | 
					
						
							|  |  |  | 	if totalLength == 0 { | 
					
						
							|  |  |  | 		return 0 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if totalLength == -1 { | 
					
						
							|  |  |  | 		return -1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	numShards := totalLength / e.BlockSize | 
					
						
							|  |  |  | 	lastBlockSize := totalLength % e.BlockSize | 
					
						
							|  |  |  | 	lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks)) | 
					
						
							|  |  |  | 	return numShards*e.ShardSize() + lastShardSize | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ShardSize - returns actual shared size from erasure blockSize.
 | 
					
						
							|  |  |  | func (e ErasureInfo) ShardSize() int64 { | 
					
						
							|  |  |  | 	return ceilFrac(e.BlockSize, int64(e.DataBlocks)) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // IsValid - tells if erasure info fields are valid.
 | 
					
						
							|  |  |  | func (fi FileInfo) IsValid() bool { | 
					
						
							|  |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		// Delete marker has no data, no need to check
 | 
					
						
							|  |  |  | 		// for erasure coding information
 | 
					
						
							|  |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	dataBlocks := fi.Erasure.DataBlocks | 
					
						
							|  |  |  | 	parityBlocks := fi.Erasure.ParityBlocks | 
					
						
							| 
									
										
										
										
											2020-10-29 10:24:01 +08:00
										 |  |  | 	correctIndexes := (fi.Erasure.Index > 0 && | 
					
						
							|  |  |  | 		fi.Erasure.Index <= dataBlocks+parityBlocks && | 
					
						
							|  |  |  | 		len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks)) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	return ((dataBlocks >= parityBlocks) && | 
					
						
							| 
									
										
										
										
											2020-10-27 07:19:42 +08:00
										 |  |  | 		(dataBlocks != 0) && (parityBlocks != 0) && | 
					
						
							| 
									
										
										
										
											2020-10-29 10:24:01 +08:00
										 |  |  | 		correctIndexes) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ToObjectInfo - Converts metadata to object info.
 | 
					
						
							|  |  |  | func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo { | 
					
						
							| 
									
										
										
										
											2020-09-19 23:39:41 +08:00
										 |  |  | 	object = decodeDirObject(object) | 
					
						
							| 
									
										
										
										
											2020-09-17 01:21:50 +08:00
										 |  |  | 	versionID := fi.VersionID | 
					
						
							| 
									
										
										
										
											2021-02-25 10:39:10 +08:00
										 |  |  | 	if (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) && versionID == "" { | 
					
						
							| 
									
										
										
										
											2020-09-17 01:21:50 +08:00
										 |  |  | 		versionID = nullVersionID | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	objInfo := ObjectInfo{ | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 		IsDir:            HasSuffix(object, SlashSeparator), | 
					
						
							|  |  |  | 		Bucket:           bucket, | 
					
						
							|  |  |  | 		Name:             object, | 
					
						
							|  |  |  | 		VersionID:        versionID, | 
					
						
							|  |  |  | 		IsLatest:         fi.IsLatest, | 
					
						
							|  |  |  | 		DeleteMarker:     fi.Deleted, | 
					
						
							|  |  |  | 		Size:             fi.Size, | 
					
						
							|  |  |  | 		ModTime:          fi.ModTime, | 
					
						
							|  |  |  | 		Legacy:           fi.XLV1, | 
					
						
							|  |  |  | 		ContentType:      fi.Metadata["content-type"], | 
					
						
							|  |  |  | 		ContentEncoding:  fi.Metadata["content-encoding"], | 
					
						
							|  |  |  | 		NumVersions:      fi.NumVersions, | 
					
						
							|  |  |  | 		SuccessorModTime: fi.SuccessorModTime, | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Update expires
 | 
					
						
							|  |  |  | 	var ( | 
					
						
							|  |  |  | 		t time.Time | 
					
						
							|  |  |  | 		e error | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 	if exp, ok := fi.Metadata["expires"]; ok { | 
					
						
							|  |  |  | 		if t, e = time.Parse(http.TimeFormat, exp); e == nil { | 
					
						
							|  |  |  | 			objInfo.Expires = t.UTC() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	objInfo.backendType = BackendErasure | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Extract etag from metadata.
 | 
					
						
							|  |  |  | 	objInfo.ETag = extractETag(fi.Metadata) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Add user tags to the object info
 | 
					
						
							| 
									
										
										
										
											2021-04-11 00:13:12 +08:00
										 |  |  | 	tags := fi.Metadata[xhttp.AmzObjectTagging] | 
					
						
							|  |  |  | 	if len(tags) != 0 { | 
					
						
							|  |  |  | 		objInfo.UserTags = tags | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-22 08:49:56 +08:00
										 |  |  | 	// Add replication status to the object info
 | 
					
						
							|  |  |  | 	objInfo.ReplicationStatus = replication.StatusType(fi.Metadata[xhttp.AmzBucketReplicationStatus]) | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	if fi.Deleted { | 
					
						
							|  |  |  | 		objInfo.ReplicationStatus = replication.StatusType(fi.DeleteMarkerReplicationStatus) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	objInfo.TransitionStatus = fi.TransitionStatus | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	objInfo.transitionedObjName = fi.TransitionedObjName | 
					
						
							|  |  |  | 	objInfo.TransitionTier = fi.TransitionTier | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// etag/md5Sum has already been extracted. We need to
 | 
					
						
							|  |  |  | 	// remove to avoid it from appearing as part of
 | 
					
						
							|  |  |  | 	// response headers. e.g, X-Minio-* or X-Amz-*.
 | 
					
						
							|  |  |  | 	// Tags have also been extracted, we remove that as well.
 | 
					
						
							|  |  |  | 	objInfo.UserDefined = cleanMetadata(fi.Metadata) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// All the parts per object.
 | 
					
						
							|  |  |  | 	objInfo.Parts = fi.Parts | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update storage class
 | 
					
						
							|  |  |  | 	if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok { | 
					
						
							|  |  |  | 		objInfo.StorageClass = sc | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		objInfo.StorageClass = globalMinioDefaultStorageClass | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	objInfo.VersionPurgeStatus = fi.VersionPurgeStatus | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	// set restore status for transitioned object
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	restoreHdr, ok := fi.Metadata[xhttp.AmzRestore] | 
					
						
							|  |  |  | 	if ok { | 
					
						
							|  |  |  | 		if restoreStatus, err := parseRestoreObjStatus(restoreHdr); err == nil { | 
					
						
							|  |  |  | 			objInfo.RestoreOngoing = restoreStatus.Ongoing() | 
					
						
							|  |  |  | 			objInfo.RestoreExpires, _ = restoreStatus.Expiry() | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Success.
 | 
					
						
							|  |  |  | 	return objInfo | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // objectPartIndex - returns the index of matching object part number.
 | 
					
						
							|  |  |  | func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { | 
					
						
							|  |  |  | 	for i, part := range parts { | 
					
						
							|  |  |  | 		if partNumber == part.Number { | 
					
						
							|  |  |  | 			return i | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return -1 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // AddObjectPart - add a new object part in order.
 | 
					
						
							|  |  |  | func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) { | 
					
						
							|  |  |  | 	partInfo := ObjectPartInfo{ | 
					
						
							|  |  |  | 		Number:     partNumber, | 
					
						
							|  |  |  | 		ETag:       partETag, | 
					
						
							|  |  |  | 		Size:       partSize, | 
					
						
							|  |  |  | 		ActualSize: actualSize, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update part info if it already exists.
 | 
					
						
							|  |  |  | 	for i, part := range fi.Parts { | 
					
						
							|  |  |  | 		if partNumber == part.Number { | 
					
						
							|  |  |  | 			fi.Parts[i] = partInfo | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Proceed to include new part info.
 | 
					
						
							|  |  |  | 	fi.Parts = append(fi.Parts, partInfo) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Parts in FileInfo should be in sorted order by part number.
 | 
					
						
							|  |  |  | 	sort.Sort(byObjectPartNumber(fi.Parts)) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ObjectToPartOffset - translate offset of an object to offset of its individual part.
 | 
					
						
							|  |  |  | func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { | 
					
						
							|  |  |  | 	if offset == 0 { | 
					
						
							|  |  |  | 		// Special case - if offset is 0, then partIndex and partOffset are always 0.
 | 
					
						
							|  |  |  | 		return 0, 0, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	partOffset = offset | 
					
						
							|  |  |  | 	// Seek until object offset maps to a particular part offset.
 | 
					
						
							|  |  |  | 	for i, part := range fi.Parts { | 
					
						
							|  |  |  | 		partIndex = i | 
					
						
							|  |  |  | 		// Offset is smaller than size we have reached the proper part offset.
 | 
					
						
							|  |  |  | 		if partOffset < part.Size { | 
					
						
							|  |  |  | 			return partIndex, partOffset, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Continue to towards the next part.
 | 
					
						
							|  |  |  | 		partOffset -= part.Size | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	logger.LogIf(ctx, InvalidRange{}) | 
					
						
							|  |  |  | 	// Offset beyond the size of the object return InvalidRange.
 | 
					
						
							|  |  |  | 	return 0, 0, InvalidRange{} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-25 09:31:56 +08:00
										 |  |  | func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) { | 
					
						
							|  |  |  | 	// with less quorum return error.
 | 
					
						
							|  |  |  | 	if quorum < 2 { | 
					
						
							|  |  |  | 		return FileInfo{}, errErasureReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	metaHashes := make([]string, len(metaArr)) | 
					
						
							| 
									
										
										
										
											2020-10-28 15:09:15 +08:00
										 |  |  | 	h := sha256.New() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for i, meta := range metaArr { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 		if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			for _, part := range meta.Parts { | 
					
						
							|  |  |  | 				h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-28 15:09:15 +08:00
										 |  |  | 			h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution))) | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 			// make sure that length of Data is same
 | 
					
						
							|  |  |  | 			h.Write([]byte(fmt.Sprintf("%v", len(meta.Data)))) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			metaHashes[i] = hex.EncodeToString(h.Sum(nil)) | 
					
						
							| 
									
										
										
										
											2020-10-28 15:09:15 +08:00
										 |  |  | 			h.Reset() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	metaHashCountMap := make(map[string]int) | 
					
						
							|  |  |  | 	for _, hash := range metaHashes { | 
					
						
							|  |  |  | 		if hash == "" { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		metaHashCountMap[hash]++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	maxHash := "" | 
					
						
							|  |  |  | 	maxCount := 0 | 
					
						
							|  |  |  | 	for hash, count := range metaHashCountMap { | 
					
						
							|  |  |  | 		if count > maxCount { | 
					
						
							|  |  |  | 			maxCount = count | 
					
						
							|  |  |  | 			maxHash = hash | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if maxCount < quorum { | 
					
						
							|  |  |  | 		return FileInfo{}, errErasureReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, hash := range metaHashes { | 
					
						
							|  |  |  | 		if hash == maxHash { | 
					
						
							| 
									
										
										
										
											2021-05-25 09:31:56 +08:00
										 |  |  | 			if metaArr[i].IsValid() { | 
					
						
							|  |  |  | 				return metaArr[i], nil | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return FileInfo{}, errErasureReadQuorum | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // pickValidFileInfo - picks one valid FileInfo content and returns from a
 | 
					
						
							|  |  |  | // slice of FileInfo.
 | 
					
						
							| 
									
										
										
										
											2021-05-25 09:31:56 +08:00
										 |  |  | func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, dataDir string, quorum int) (FileInfo, error) { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 	return findFileInfoInQuorum(ctx, metaArr, modTime, dataDir, quorum) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
 | 
					
						
							|  |  |  | func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Start writing `xl.meta` to all disks in parallel.
 | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// Pick one FileInfo for a disk at index.
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 			fi := files[index] | 
					
						
							|  |  |  | 			fi.Erasure.Index = index + 1 | 
					
						
							|  |  |  | 			if fi.IsValid() { | 
					
						
							|  |  |  | 				return disks[index].WriteMetadata(ctx, bucket, prefix, fi) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return errCorruptedFormat | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		}, index) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for all the routines.
 | 
					
						
							|  |  |  | 	mErrs := g.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) | 
					
						
							|  |  |  | 	return evalDisks(disks, mErrs), err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns per object readQuorum and writeQuorum
 | 
					
						
							|  |  |  | // readQuorum is the min required disks to read data.
 | 
					
						
							|  |  |  | // writeQuorum is the min required disks to write data.
 | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
 | 
					
						
							|  |  |  | 	latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return 0, 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-25 09:31:56 +08:00
										 |  |  | 	if !latestFileInfo.IsValid() { | 
					
						
							|  |  |  | 		return 0, 0, errErasureReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parityBlocks := globalStorageClass.GetParityForSC(latestFileInfo.Metadata[xhttp.AmzStorageClass]) | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 	if parityBlocks <= 0 { | 
					
						
							|  |  |  | 		parityBlocks = defaultParityCount | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-25 09:31:56 +08:00
										 |  |  | 	dataBlocks := len(partsMetaData) - parityBlocks | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	writeQuorum := dataBlocks | 
					
						
							|  |  |  | 	if dataBlocks == parityBlocks { | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 		writeQuorum++ | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
 | 
					
						
							|  |  |  | 	// from latestFileInfo to get the quorum
 | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	return dataBlocks, writeQuorum, nil | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | } |