| 
									
										
										
										
											2016-05-31 07:51:59 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  |  * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. | 
					
						
							| 
									
										
										
										
											2016-05-31 07:51:59 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2017-02-01 07:34:49 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2016-07-08 22:33:21 +08:00
										 |  |  | 	"hash/crc32" | 
					
						
							| 
									
										
										
										
											2016-06-03 07:34:15 +08:00
										 |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-01-08 19:31:43 +08:00
										 |  |  | 	jsoniter "github.com/json-iterator/go" | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/sync/errgroup" | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | // Returns number of errors that occurred the most (incl. nil) and the
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // corresponding error value. NB When there is more than one error value that
 | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | // occurs maximum number of times, the error value returned depends on how
 | 
					
						
							|  |  |  | // golang's map orders keys. This doesn't affect correctness as long as quorum
 | 
					
						
							|  |  |  | // value is greater than or equal to simple majority, since none of the equally
 | 
					
						
							|  |  |  | // maximal values would occur quorum or more number of times.
 | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) { | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 	errorCounts := make(map[error]int) | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 	for _, err := range errs { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if IsErrIgnored(err, ignoredErrs...) { | 
					
						
							| 
									
										
										
										
											2016-07-20 10:24:32 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 		errorCounts[err]++ | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-23 01:15:16 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 	max := 0 | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 	for err, count := range errorCounts { | 
					
						
							| 
									
										
										
										
											2017-03-23 01:15:16 +08:00
										 |  |  | 		switch { | 
					
						
							|  |  |  | 		case max < count: | 
					
						
							| 
									
										
										
										
											2016-07-14 02:56:25 +08:00
										 |  |  | 			max = count | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | 			maxErr = err | 
					
						
							| 
									
										
										
										
											2017-03-23 01:15:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Prefer `nil` over other error values with the same
 | 
					
						
							|  |  |  | 		// number of occurrences.
 | 
					
						
							|  |  |  | 		case max == count && err == nil: | 
					
						
							|  |  |  | 			maxErr = err | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | 	return max, maxErr | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // reduceQuorumErrs behaves like reduceErrs by only for returning
 | 
					
						
							|  |  |  | // values of maximally occurring errors validated against a generic
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // quorum number that can be read or write quorum depending on usage.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, quorum int, quorumErr error) error { | 
					
						
							|  |  |  | 	maxCount, maxErr := reduceErrs(errs, ignoredErrs) | 
					
						
							|  |  |  | 	if maxCount >= quorum { | 
					
						
							|  |  |  | 		return maxErr | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	return quorumErr | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // reduceReadQuorumErrs behaves like reduceErrs but only for returning
 | 
					
						
							|  |  |  | // values of maximally occurring errors validated against readQuorum.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) { | 
					
						
							|  |  |  | 	return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum) | 
					
						
							| 
									
										
										
										
											2016-11-21 17:47:26 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // reduceWriteQuorumErrs behaves like reduceErrs but only for returning
 | 
					
						
							|  |  |  | // values of maximally occurring errors validated against writeQuorum.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) { | 
					
						
							|  |  |  | 	return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum) | 
					
						
							| 
									
										
										
										
											2016-07-10 04:01:32 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // Similar to 'len(slice)' but returns the actual elements count
 | 
					
						
							| 
									
										
										
										
											2016-06-18 02:57:51 +08:00
										 |  |  | // skipping the unallocated elements.
 | 
					
						
							|  |  |  | func diskCount(disks []StorageAPI) int { | 
					
						
							|  |  |  | 	diskCount := 0 | 
					
						
							|  |  |  | 	for _, disk := range disks { | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		diskCount++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return diskCount | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // hashOrder - hashes input key to return consistent
 | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | // hashed integer slice. Returned integer order is salted
 | 
					
						
							|  |  |  | // with an input key. This results in consistent order.
 | 
					
						
							|  |  |  | // NOTE: collisions are fine, we are not looking for uniqueness
 | 
					
						
							|  |  |  | // in the slices returned.
 | 
					
						
							|  |  |  | func hashOrder(key string, cardinality int) []int { | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 	if cardinality <= 0 { | 
					
						
							|  |  |  | 		// Returns an empty int slice for cardinality < 0.
 | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | 	nums := make([]int, cardinality) | 
					
						
							|  |  |  | 	keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable) | 
					
						
							| 
									
										
										
										
											2016-07-08 22:33:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-12-01 04:57:03 +08:00
										 |  |  | 	start := int(keyCrc % uint32(cardinality)) | 
					
						
							| 
									
										
										
										
											2016-07-22 10:07:00 +08:00
										 |  |  | 	for i := 1; i <= cardinality; i++ { | 
					
						
							|  |  |  | 		nums[i-1] = 1 + ((start + i) % cardinality) | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-07-08 22:33:21 +08:00
										 |  |  | 	return nums | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | // Constructs xlMetaV1 using `jsoniter` lib.
 | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, err error) { | 
					
						
							| 
									
										
										
										
											2020-01-08 19:31:43 +08:00
										 |  |  | 	var json = jsoniter.ConfigCompatibleWithStandardLibrary | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | 	err = json.Unmarshal(xlMetaBuf, &xlMeta) | 
					
						
							|  |  |  | 	return xlMeta, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | func readXLMetaParts(ctx context.Context, disk StorageAPI, bucket string, object string) ([]ObjectPartInfo, map[string]string, error) { | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Reads entire `xl.json`.
 | 
					
						
							|  |  |  | 	xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return nil, nil, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | 	var xlMeta xlMetaV1 | 
					
						
							|  |  |  | 	xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | 	return xlMeta.Parts, xlMeta.Meta, nil | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | // read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using jsoniter.
 | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | func readXLMetaStat(ctx context.Context, disk StorageAPI, bucket string, object string) (si statInfo, | 
					
						
							|  |  |  | 	mp map[string]string, e error) { | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Reads entire `xl.json`.
 | 
					
						
							|  |  |  | 	xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return si, nil, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | 	var xlMeta xlMetaV1 | 
					
						
							|  |  |  | 	xlMeta, err = xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | 		return si, mp, err | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	// Return structured `xl.json`.
 | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | 	return xlMeta.Stat, xlMeta.Meta, nil | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-26 05:51:06 +08:00
										 |  |  | // readXLMeta reads `xl.json` and returns back XL metadata structure.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) { | 
					
						
							| 
									
										
										
										
											2016-06-24 17:06:23 +08:00
										 |  |  | 	// Reads entire `xl.json`.
 | 
					
						
							| 
									
										
										
										
											2016-09-09 13:38:18 +08:00
										 |  |  | 	xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile)) | 
					
						
							| 
									
										
										
										
											2016-06-26 05:51:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-10-31 07:07:57 +08:00
										 |  |  | 		if err != errFileNotFound && err != errVolumeNotFound { | 
					
						
							| 
									
										
										
										
											2018-09-14 12:42:50 +08:00
										 |  |  | 			logger.GetReqInfo(ctx).AppendTags("disk", disk.String()) | 
					
						
							| 
									
										
										
										
											2018-04-26 02:46:49 +08:00
										 |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return xlMetaV1{}, err | 
					
						
							| 
									
										
										
										
											2016-06-03 07:34:15 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-14 03:57:03 +08:00
										 |  |  | 	if len(xlMetaBuf) == 0 { | 
					
						
							|  |  |  | 		return xlMetaV1{}, errFileNotFound | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-09-12 01:22:12 +08:00
										 |  |  | 	return xlMetaV1UnmarshalJSON(ctx, xlMetaBuf) | 
					
						
							| 
									
										
										
										
											2016-05-26 07:42:31 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2016-07-13 09:23:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | // Reads all `xl.json` metadata as a xlMetaV1 slice.
 | 
					
						
							|  |  |  | // Returns error slice indicating the failed metadata reads.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) { | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | 	metadataArray := make([]xlMetaV1, len(disks)) | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | 	// Read `xl.json` parallelly across disks.
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() (err error) { | 
					
						
							|  |  |  | 			if disks[index] == nil { | 
					
						
							|  |  |  | 				return errDiskNotFound | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			metadataArray[index], err = readXLMeta(ctx, disks[index], bucket, object) | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Return all the metadata.
 | 
					
						
							| 
									
										
										
										
											2019-10-15 00:44:51 +08:00
										 |  |  | 	return metadataArray, g.Wait() | 
					
						
							| 
									
										
										
										
											2016-07-27 02:34:48 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | // Return shuffled partsMetadata depending on distribution.
 | 
					
						
							|  |  |  | func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) { | 
					
						
							|  |  |  | 	if distribution == nil { | 
					
						
							|  |  |  | 		return partsMetadata | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata)) | 
					
						
							|  |  |  | 	// Shuffle slice xl metadata for expected distribution.
 | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	for index := range partsMetadata { | 
					
						
							|  |  |  | 		blockIndex := distribution[index] | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 		shuffledPartsMetadata[blockIndex-1] = partsMetadata[index] | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 	return shuffledPartsMetadata | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | // shuffleDisks - shuffle input disks slice depending on the
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // erasure distribution. Return shuffled slice of disks with
 | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | // their expected distribution.
 | 
					
						
							|  |  |  | func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) { | 
					
						
							|  |  |  | 	if distribution == nil { | 
					
						
							|  |  |  | 		return disks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	shuffledDisks = make([]StorageAPI, len(disks)) | 
					
						
							|  |  |  | 	// Shuffle disks for expected distribution.
 | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		blockIndex := distribution[index] | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 		shuffledDisks[blockIndex-1] = disks[index] | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-25 01:20:40 +08:00
										 |  |  | 	return shuffledDisks | 
					
						
							| 
									
										
										
										
											2016-07-16 23:35:30 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | // evalDisks - returns a new slice of disks where nil is set if
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:09:29 +08:00
										 |  |  | // the corresponding error in errs slice is not nil
 | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | func evalDisks(disks []StorageAPI, errs []error) []StorageAPI { | 
					
						
							|  |  |  | 	if len(errs) != len(disks) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(context.Background(), errors.New("unexpected disks/errors slice length")) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:14:27 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	newDisks := make([]StorageAPI, len(disks)) | 
					
						
							|  |  |  | 	for index := range errs { | 
					
						
							|  |  |  | 		if errs[index] == nil { | 
					
						
							|  |  |  | 			newDisks[index] = disks[index] | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			newDisks[index] = nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return newDisks | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | // Errors specifically generated by calculatePartSizeFromIdx function.
 | 
					
						
							| 
									
										
										
										
											2017-02-01 07:34:49 +08:00
										 |  |  | var ( | 
					
						
							|  |  |  | 	errPartSizeZero  = errors.New("Part size cannot be zero") | 
					
						
							|  |  |  | 	errPartSizeIndex = errors.New("Part index cannot be smaller than 1") | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | // calculatePartSizeFromIdx calculates the part size according to input index.
 | 
					
						
							|  |  |  | // returns error if totalSize is -1, partSize is 0, partIndex is 0.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	if totalSize < -1 { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errInvalidArgument) | 
					
						
							|  |  |  | 		return 0, errInvalidArgument | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	if partSize == 0 { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errPartSizeZero) | 
					
						
							|  |  |  | 		return 0, errPartSizeZero | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if partIndex < 1 { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, errPartSizeIndex) | 
					
						
							|  |  |  | 		return 0, errPartSizeIndex | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-05-01 07:27:31 +08:00
										 |  |  | 	if totalSize == -1 { | 
					
						
							|  |  |  | 		return -1, nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	if totalSize > 0 { | 
					
						
							|  |  |  | 		// Compute the total count of parts
 | 
					
						
							|  |  |  | 		partsCount := totalSize/partSize + 1 | 
					
						
							|  |  |  | 		// Return the part's size
 | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case int64(partIndex) < partsCount: | 
					
						
							|  |  |  | 			currPartSize = partSize | 
					
						
							|  |  |  | 		case int64(partIndex) == partsCount: | 
					
						
							|  |  |  | 			// Size of last part
 | 
					
						
							|  |  |  | 			currPartSize = totalSize % partSize | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			currPartSize = 0 | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	return currPartSize, nil | 
					
						
							| 
									
										
										
										
											2017-01-31 07:44:42 +08:00
										 |  |  | } |