| 
									
										
										
										
											2016-05-31 07:51:59 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2019-04-10 02:39:42 +08:00
										 |  |  |  * MinIO Cloud Storage, (C) 2016 MinIO, Inc. | 
					
						
							| 
									
										
										
										
											2016-05-31 07:51:59 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2017-02-01 07:34:49 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2016-07-08 22:33:21 +08:00
										 |  |  | 	"hash/crc32" | 
					
						
							| 
									
										
										
										
											2016-06-03 07:34:15 +08:00
										 |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 	jsoniter "github.com/json-iterator/go" | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	"github.com/tidwall/gjson" | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | // Returns number of errors that occurred the most (incl. nil) and the
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // corresponding error value. NB When there is more than one error value that
 | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | // occurs maximum number of times, the error value returned depends on how
 | 
					
						
							|  |  |  | // golang's map orders keys. This doesn't affect correctness as long as quorum
 | 
					
						
							|  |  |  | // value is greater than or equal to simple majority, since none of the equally
 | 
					
						
							|  |  |  | // maximal values would occur quorum or more number of times.
 | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) { | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 	errorCounts := make(map[error]int) | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 	for _, err := range errs { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if IsErrIgnored(err, ignoredErrs...) { | 
					
						
							| 
									
										
										
										
											2016-07-20 10:24:32 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 		errorCounts[err]++ | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-23 01:15:16 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 	max := 0 | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 	for err, count := range errorCounts { | 
					
						
							| 
									
										
										
										
											2017-03-23 01:15:16 +08:00
										 |  |  | 		switch { | 
					
						
							|  |  |  | 		case max < count: | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 			max = count | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | 			maxErr = err | 
					
						
							| 
									
										
										
										
											2017-03-23 01:15:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Prefer `nil` over other error values with the same
 | 
					
						
							|  |  |  | 		// number of occurrences.
 | 
					
						
							|  |  |  | 		case max == count && err == nil: | 
					
						
							|  |  |  | 			maxErr = err | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | 	return max, maxErr | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // reduceQuorumErrs behaves like reduceErrs by only for returning
 | 
					
						
							|  |  |  | // values of maximally occurring errors validated against a generic
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // quorum number that can be read or write quorum depending on usage.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error { | 
					
						
							|  |  |  | 	maxCount, maxErr := reduceErrs(errs, ignoredErrs) | 
					
						
							|  |  |  | 	if maxCount >= quorum { | 
					
						
							|  |  |  | 		return maxErr | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	return quorumErr | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // reduceReadQuorumErrs behaves like reduceErrs but only for returning
 | 
					
						
							|  |  |  | // values of maximally occurring errors validated against readQuorum.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) { | 
					
						
							|  |  |  | 	return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum) | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // reduceWriteQuorumErrs behaves like reduceErrs but only for returning
 | 
					
						
							|  |  |  | // values of maximally occurring errors validated against writeQuorum.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) { | 
					
						
							|  |  |  | 	return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum) | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // Similar to 'len(slice)' but returns the actual elements count
 | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | // skipping the unallocated elements.
 | 
					
						
							|  |  |  | func diskCount(disks []StorageAPI) int { | 
					
						
							|  |  |  | 	diskCount := 0 | 
					
						
							|  |  |  | 	for _, disk := range disks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		diskCount++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return diskCount | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // hashOrder - hashes input key to return consistent
 | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | // hashed integer slice. Returned integer order is salted
 | 
					
						
							|  |  |  | // with an input key. This results in consistent order.
 | 
					
						
							|  |  |  | // NOTE: collisions are fine, we are not looking for uniqueness
 | 
					
						
							|  |  |  | // in the slices returned.
 | 
					
						
							|  |  |  | func hashOrder(key string, cardinality int) []int { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	if cardinality <= 0 { | 
					
						
							|  |  |  | 		// Returns an empty int slice for cardinality < 0.
 | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | 	nums := make([]int, cardinality) | 
					
						
							|  |  |  | 	keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable) | 
					
						
							| 
									
										
										
										
											2016-07-08 22:33:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-12-01 04:57:03 +08:00
										 |  |  | 	start := int(keyCrc % uint32(cardinality)) | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | 	for i := 1; i <= cardinality; i++ { | 
					
						
							|  |  |  | 		nums[i-1] = 1 + ((start + i) % cardinality) | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-07-08 22:33:21 +08:00
										 |  |  | 	return nums | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) { | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// obtain stat info.
 | 
					
						
							|  |  |  | 	stat := statInfo{} | 
					
						
							|  |  |  | 	// fetching modTime.
 | 
					
						
							|  |  |  | 	modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String()) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return si, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	stat.ModTime = modTime | 
					
						
							|  |  |  | 	// obtain Stat.Size .
 | 
					
						
							|  |  |  | 	stat.Size = gjson.GetBytes(xlMetaBuf, "stat.size").Int() | 
					
						
							|  |  |  | 	return stat, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func parseXLVersion(xlMetaBuf []byte) string { | 
					
						
							|  |  |  | 	return gjson.GetBytes(xlMetaBuf, "version").String() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func parseXLFormat(xlMetaBuf []byte) string { | 
					
						
							|  |  |  | 	return gjson.GetBytes(xlMetaBuf, "format").String() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | func parseXLParts(xlMetaBuf []byte) []ObjectPartInfo { | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Parse the XL Parts.
 | 
					
						
							|  |  |  | 	partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array() | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 	partInfo := make([]ObjectPartInfo, len(partsResult)) | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	for i, p := range partsResult { | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 		info := ObjectPartInfo{} | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 		info.Number = int(p.Get("number").Int()) | 
					
						
							|  |  |  | 		info.Name = p.Get("name").String() | 
					
						
							|  |  |  | 		info.ETag = p.Get("etag").String() | 
					
						
							|  |  |  | 		info.Size = p.Get("size").Int() | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		info.ActualSize = p.Get("actualSize").Int() | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 		partInfo[i] = info | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return partInfo | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func parseXLMetaMap(xlMetaBuf []byte) map[string]string { | 
					
						
							|  |  |  | 	// Get xlMetaV1.Meta map.
 | 
					
						
							|  |  |  | 	metaMapResult := gjson.GetBytes(xlMetaBuf, "meta").Map() | 
					
						
							|  |  |  | 	metaMap := make(map[string]string) | 
					
						
							|  |  |  | 	for key, valResult := range metaMapResult { | 
					
						
							|  |  |  | 		metaMap[key] = valResult.String() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return metaMap | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Constructs XLMetaV1 using `gjson` lib to retrieve each field.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, e error) { | 
					
						
							| 
									
										
										
										
											2019-04-24 05:54:28 +08:00
										 |  |  | 	var json = jsoniter.ConfigCompatibleWithStandardLibrary | 
					
						
							|  |  |  | 	e = json.Unmarshal(xlMetaBuf, &xlMeta) | 
					
						
							|  |  |  | 	return xlMeta, e | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object string) ([]ObjectPartInfo, map[string]string, error) { | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Reads entire `xl.json`.
 | 
					
						
							|  |  |  | 	xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return nil, nil, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
 | 
					
						
							|  |  |  | 	xlMetaParts := parseXLParts(xlMetaBuf) | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 	xlMetaMap := parseXLMetaMap(xlMetaBuf) | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 	return xlMetaParts, xlMetaMap, nil | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) { | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Reads entire `xl.json`.
 | 
					
						
							|  |  |  | 	xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return si, nil, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// obtain version.
 | 
					
						
							|  |  |  | 	xlVersion := parseXLVersion(xlMetaBuf) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// obtain format.
 | 
					
						
							|  |  |  | 	xlFormat := parseXLFormat(xlMetaBuf) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Validate if the xl.json we read is sane, return corrupted format.
 | 
					
						
							| 
									
										
										
										
											2018-01-12 20:46:30 +08:00
										 |  |  | 	if !isXLMetaFormatValid(xlVersion, xlFormat) { | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 		// For version mismatchs and unrecognized format, return corrupted format.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errCorruptedFormat) | 
					
						
							|  |  |  | 		return si, nil, errCorruptedFormat | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
 | 
					
						
							|  |  |  | 	xlMetaMap := parseXLMetaMap(xlMetaBuf) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
 | 
					
						
							|  |  |  | 	xlStat, err := parseXLStat(xlMetaBuf) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return si, nil, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Return structured `xl.json`.
 | 
					
						
							|  |  |  | 	return xlStat, xlMetaMap, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-26 05:51:06 +08:00
										 |  |  | // readXLMeta reads `xl.json` and returns back XL metadata structure.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) { | 
					
						
							| 
									
										
										
										
											2016-06-24 17:06:23 +08:00
										 |  |  | 	// Reads entire `xl.json`.
 | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) | 
					
						
							| 
									
										
										
										
											2016-06-26 05:51:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-10-31 07:07:57 +08:00
										 |  |  | 		if err != errFileNotFound && err != errVolumeNotFound { | 
					
						
							| 
									
										
										
										
											2018-09-14 12:42:50 +08:00
										 |  |  | 			logger.GetReqInfo(ctx).AppendTags("disk", disk.String()) | 
					
						
							| 
									
										
										
										
											2018-04-26 02:46:49 +08:00
										 |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return xlMetaV1{}, err | 
					
						
							| 
									
										
										
										
											2016-06-03 07:34:15 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-14 03:57:03 +08:00
										 |  |  | 	if len(xlMetaBuf) == 0 { | 
					
						
							|  |  |  | 		return xlMetaV1{}, errFileNotFound | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-08-21 07:58:47 +08:00
										 |  |  | 		logger.GetReqInfo(ctx).AppendTags("disk", disk.String()) | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return xlMetaV1{}, err | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-24 17:06:23 +08:00
										 |  |  | 	// Return structured `xl.json`.
 | 
					
						
							|  |  |  | 	return xlMeta, nil | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2016-07-13 09:23:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | // Reads all `xl.json` metadata as a xlMetaV1 slice.
 | 
					
						
							|  |  |  | // Returns error slice indicating the failed metadata reads.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) { | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | 	errs := make([]error, len(disks)) | 
					
						
							|  |  |  | 	metadataArray := make([]xlMetaV1, len(disks)) | 
					
						
							|  |  |  | 	var wg = &sync.WaitGroup{} | 
					
						
							|  |  |  | 	// Read `xl.json` parallelly across disks.
 | 
					
						
							|  |  |  | 	for index, disk := range disks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			errs[index] = errDiskNotFound | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		// Read `xl.json` in routine.
 | 
					
						
							|  |  |  | 		go func(index int, disk StorageAPI) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			var err error | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			metadataArray[index], err = readXLMeta(ctx, disk, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				errs[index] = err | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}(index, disk) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Wait for all the routines to finish.
 | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Return all the metadata.
 | 
					
						
							|  |  |  | 	return metadataArray, errs | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | // Return shuffled partsMetadata depending on distribution.
 | 
					
						
							|  |  |  | func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) { | 
					
						
							|  |  |  | 	if distribution == nil { | 
					
						
							|  |  |  | 		return partsMetadata | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata)) | 
					
						
							|  |  |  | 	// Shuffle slice xl metadata for expected distribution.
 | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	for index := range partsMetadata { | 
					
						
							|  |  |  | 		blockIndex := distribution[index] | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 		shuffledPartsMetadata[blockIndex-1] = partsMetadata[index] | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 	return shuffledPartsMetadata | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | // shuffleDisks - shuffle input disks slice depending on the
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // erasure distribution. Return shuffled slice of disks with
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | // their expected distribution.
 | 
					
						
							|  |  |  | func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) { | 
					
						
							|  |  |  | 	if distribution == nil { | 
					
						
							|  |  |  | 		return disks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	shuffledDisks = make([]StorageAPI, len(disks)) | 
					
						
							|  |  |  | 	// Shuffle disks for expected distribution.
 | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		blockIndex := distribution[index] | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 		shuffledDisks[blockIndex-1] = disks[index] | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 	return shuffledDisks | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | // evalDisks - returns a new slice of disks where nil is set if
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // the corresponding error in errs slice is not nil
 | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | func evalDisks(disks []StorageAPI, errs []error) []StorageAPI { | 
					
						
							|  |  |  | 	if len(errs) != len(disks) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(context.Background(), errors.New("unexpected disks/errors slice length")) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	newDisks := make([]StorageAPI, len(disks)) | 
					
						
							|  |  |  | 	for index := range errs { | 
					
						
							|  |  |  | 		if errs[index] == nil { | 
					
						
							|  |  |  | 			newDisks[index] = disks[index] | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			newDisks[index] = nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return newDisks | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | // Errors specifically generated by calculatePartSizeFromIdx function.
 | 
					
						
							| 
									
										
										
										
											2017-02-01 07:34:49 +08:00
										 |  |  | var ( | 
					
						
							|  |  |  | 	errPartSizeZero  = errors.New("Part size cannot be zero") | 
					
						
							|  |  |  | 	errPartSizeIndex = errors.New("Part index cannot be smaller than 1") | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | // calculatePartSizeFromIdx calculates the part size according to input index.
 | 
					
						
							|  |  |  | // returns error if totalSize is -1, partSize is 0, partIndex is 0.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	if totalSize < -1 { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errInvalidArgument) | 
					
						
							|  |  |  | 		return 0, errInvalidArgument | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	if partSize == 0 { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errPartSizeZero) | 
					
						
							|  |  |  | 		return 0, errPartSizeZero | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if partIndex < 1 { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errPartSizeIndex) | 
					
						
							|  |  |  | 		return 0, errPartSizeIndex | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	if totalSize > 0 { | 
					
						
							|  |  |  | 		// Compute the total count of parts
 | 
					
						
							|  |  |  | 		partsCount := totalSize/partSize + 1 | 
					
						
							|  |  |  | 		// Return the part's size
 | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case int64(partIndex) < partsCount: | 
					
						
							|  |  |  | 			currPartSize = partSize | 
					
						
							|  |  |  | 		case int64(partIndex) == partsCount: | 
					
						
							|  |  |  | 			// Size of last part
 | 
					
						
							|  |  |  | 			currPartSize = totalSize % partSize | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			currPartSize = 0 | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	return currPartSize, nil | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | } |