| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	"path/filepath" | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 	"testing" | 
					
						
							|  |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2019-03-15 04:08:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-20 08:53:08 +08:00
										 |  |  | 	"github.com/minio/madmin-go/v3" | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-10-13 07:42:45 +08:00
										 |  |  | // Returns the latest updated FileInfo files and error in case of failure.
 | 
					
						
							|  |  |  | func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, defaultParityCount int, errs []error) (FileInfo, error) { | 
					
						
							| 
									
										
										
										
											2024-01-18 15:03:17 +08:00
										 |  |  | 	// There should be at least half correct entries, if not return failure
 | 
					
						
							| 
									
										
										
										
											2022-10-13 07:42:45 +08:00
										 |  |  | 	expectedRQuorum := len(partsMetadata) / 2 | 
					
						
							|  |  |  | 	if defaultParityCount == 0 { | 
					
						
							|  |  |  | 		// if parity count is '0', we expected all entries to be present.
 | 
					
						
							|  |  |  | 		expectedRQuorum = len(partsMetadata) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, expectedRQuorum) | 
					
						
							|  |  |  | 	if reducedErr != nil { | 
					
						
							|  |  |  | 		return FileInfo{}, reducedErr | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// List all the file commit ids from parts metadata.
 | 
					
						
							|  |  |  | 	modTimes := listObjectModtimes(partsMetadata, errs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Count all latest updated FileInfo values
 | 
					
						
							|  |  |  | 	var count int | 
					
						
							|  |  |  | 	var latestFileInfo FileInfo | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Reduce list of UUIDs to a single common value - i.e. the last updated Time
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 	modTime := commonTime(modTimes, expectedRQuorum) | 
					
						
							| 
									
										
										
										
											2022-10-13 07:42:45 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if modTime.IsZero() || modTime.Equal(timeSentinel) { | 
					
						
							|  |  |  | 		return FileInfo{}, errErasureReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-18 15:03:17 +08:00
										 |  |  | 	// Iterate through all the modTimes and count the FileInfo(s) with latest time.
 | 
					
						
							| 
									
										
										
										
											2022-10-13 07:42:45 +08:00
										 |  |  | 	for index, t := range modTimes { | 
					
						
							|  |  |  | 		if partsMetadata[index].IsValid() && t.Equal(modTime) { | 
					
						
							|  |  |  | 			latestFileInfo = partsMetadata[index] | 
					
						
							|  |  |  | 			count++ | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !latestFileInfo.IsValid() { | 
					
						
							|  |  |  | 		return FileInfo{}, errErasureReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if count < latestFileInfo.Erasure.DataBlocks { | 
					
						
							|  |  |  | 		return FileInfo{}, errErasureReadQuorum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return latestFileInfo, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | // validates functionality provided to find most common
 | 
					
						
							|  |  |  | // time occurrence from a list of time.
 | 
					
						
							|  |  |  | func TestCommonTime(t *testing.T) { | 
					
						
							|  |  |  | 	// List of test cases for common modTime.
 | 
					
						
							|  |  |  | 	testCases := []struct { | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 		times  []time.Time | 
					
						
							|  |  |  | 		time   time.Time | 
					
						
							|  |  |  | 		quorum int | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 	}{ | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// 1. Tests common times when slice has varying time elements.
 | 
					
						
							|  |  |  | 			[]time.Time{ | 
					
						
							|  |  |  | 				time.Unix(0, 1).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 2).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 2).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 1).UTC(), | 
					
						
							| 
									
										
										
										
											2021-11-21 03:26:30 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			time.Unix(0, 3).UTC(), | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			3, | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// 2. Tests common time obtained when all elements are equal.
 | 
					
						
							|  |  |  | 			[]time.Time{ | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							| 
									
										
										
										
											2021-11-21 03:26:30 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			time.Unix(0, 3).UTC(), | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			4, | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			// 3. Tests common time obtained when elements have a mixture of
 | 
					
						
							|  |  |  | 			// sentinel values and don't have read quorum on any of the values.
 | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 			[]time.Time{ | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 2).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 1).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 4).UTC(), | 
					
						
							|  |  |  | 				time.Unix(0, 3).UTC(), | 
					
						
							|  |  |  | 				timeSentinel, | 
					
						
							|  |  |  | 				timeSentinel, | 
					
						
							|  |  |  | 				timeSentinel, | 
					
						
							| 
									
										
										
										
											2021-11-21 03:26:30 +08:00
										 |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			timeSentinel, | 
					
						
							|  |  |  | 			5, | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Tests all the testcases, and validates them against expected
 | 
					
						
							|  |  |  | 	// common modtime. Tests fail if modtime does not match.
 | 
					
						
							|  |  |  | 	for i, testCase := range testCases { | 
					
						
							|  |  |  | 		// Obtain a common mod time from modTimes slice.
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 		ctime := commonTime(testCase.times, testCase.quorum) | 
					
						
							| 
									
										
										
										
											2020-08-25 03:11:20 +08:00
										 |  |  | 		if !testCase.time.Equal(ctime) { | 
					
						
							| 
									
										
										
										
											2021-11-21 03:26:30 +08:00
										 |  |  | 			t.Errorf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime) | 
					
						
							| 
									
										
										
										
											2016-07-13 06:20:31 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // TestListOnlineDisks - checks if listOnlineDisks and outDatedDisks
 | 
					
						
							|  |  |  | // are consistent with each other.
 | 
					
						
							|  |  |  | func TestListOnlineDisks(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	obj, disks, err := prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		t.Fatalf("Prepare Erasure backend failed - %v", err) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 	setObjectLayer(obj) | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	defer removeRoots(disks) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	type tamperKind int | 
					
						
							|  |  |  | 	const ( | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 		noTamper tamperKind = iota | 
					
						
							|  |  |  | 		deletePart | 
					
						
							|  |  |  | 		corruptPart | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	) | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	timeSentinel := time.Unix(1, 0).UTC() | 
					
						
							|  |  |  | 	threeNanoSecs := time.Unix(3, 0).UTC() | 
					
						
							|  |  |  | 	fourNanoSecs := time.Unix(4, 0).UTC() | 
					
						
							|  |  |  | 	modTimesThreeNone := make([]time.Time, 16) | 
					
						
							|  |  |  | 	modTimesThreeFour := make([]time.Time, 16) | 
					
						
							|  |  |  | 	for i := 0; i < 16; i++ { | 
					
						
							|  |  |  | 		// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
 | 
					
						
							|  |  |  | 		// to be tampered with.
 | 
					
						
							|  |  |  | 		if i > 12 { | 
					
						
							|  |  |  | 			modTimesThreeFour[i] = fourNanoSecs | 
					
						
							|  |  |  | 			modTimesThreeNone[i] = timeSentinel | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		modTimesThreeFour[i] = threeNanoSecs | 
					
						
							|  |  |  | 		modTimesThreeNone[i] = threeNanoSecs | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		modTimes       []time.Time | 
					
						
							|  |  |  | 		expectedTime   time.Time | 
					
						
							|  |  |  | 		errs           []error | 
					
						
							|  |  |  | 		_tamperBackend tamperKind | 
					
						
							|  |  |  | 	}{ | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			modTimes:     modTimesThreeFour, | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			expectedTime: threeNanoSecs, | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 			errs: []error{ | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			_tamperBackend: noTamper, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			modTimes:     modTimesThreeNone, | 
					
						
							|  |  |  | 			expectedTime: threeNanoSecs, | 
					
						
							|  |  |  | 			errs: []error{ | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				// Disks that have a valid xl.meta.
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				// Some disks can't access xl.meta.
 | 
					
						
							|  |  |  | 				errFileNotFound, errDiskAccessDenied, errDiskNotFound, | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			_tamperBackend: deletePart, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			modTimes:     modTimesThreeNone, | 
					
						
							|  |  |  | 			expectedTime: threeNanoSecs, | 
					
						
							|  |  |  | 			errs: []error{ | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				// Disks that have a valid xl.meta.
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				// Some disks don't have xl.meta.
 | 
					
						
							|  |  |  | 				errDiskNotFound, errFileNotFound, errFileNotFound, | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			_tamperBackend: corruptPart, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to make a bucket %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	object := "object" | 
					
						
							| 
									
										
										
										
											2021-04-01 00:19:14 +08:00
										 |  |  | 	data := bytes.Repeat([]byte("a"), smallFileThreshold*16) | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							| 
									
										
										
										
											2023-07-18 00:52:05 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	erasureDisks, err := z.GetDisks(0, 0) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	for i, test := range testCases { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 		test := test | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 		t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { | 
					
						
							|  |  |  | 			_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-31 04:43:25 +08:00
										 |  |  | 			partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true) | 
					
						
							| 
									
										
										
										
											2022-07-21 22:25:54 +08:00
										 |  |  | 			fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				t.Fatalf("Failed to getLatestFileInfo %v", err) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			for j := range partsMetadata { | 
					
						
							|  |  |  | 				if errs[j] != nil { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 					t.Fatalf("expected error to be nil: %s", errs[j]) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 				partsMetadata[j].ModTime = test.modTimes[j] | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			tamperedIndex := -1 | 
					
						
							|  |  |  | 			switch test._tamperBackend { | 
					
						
							|  |  |  | 			case deletePart: | 
					
						
							|  |  |  | 				for index, err := range test.errs { | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					// Remove a part from a disk
 | 
					
						
							|  |  |  | 					// which has a valid xl.meta,
 | 
					
						
							|  |  |  | 					// and check if that disk
 | 
					
						
							|  |  |  | 					// appears in outDatedDisks.
 | 
					
						
							|  |  |  | 					tamperedIndex = index | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 					dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ | 
					
						
							|  |  |  | 						Recursive: false, | 
					
						
							| 
									
										
										
										
											2023-11-29 14:35:16 +08:00
										 |  |  | 						Immediate: false, | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 					}) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 					if dErr != nil { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 						t.Fatalf("Failed to delete %s - %v", filepath.Join(object, "part.1"), dErr) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					break | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			case corruptPart: | 
					
						
							|  |  |  | 				for index, err := range test.errs { | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					// Corrupt a part from a disk
 | 
					
						
							|  |  |  | 					// which has a valid xl.meta,
 | 
					
						
							|  |  |  | 					// and check if that disk
 | 
					
						
							|  |  |  | 					// appears in outDatedDisks.
 | 
					
						
							|  |  |  | 					tamperedIndex = index | 
					
						
							|  |  |  | 					filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1") | 
					
						
							|  |  |  | 					f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						t.Fatalf("Failed to open %s: %s\n", filePath, err) | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-11-17 01:28:29 +08:00
										 |  |  | 					f.WriteString("oops") // Will cause bitrot error
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 					f.Close() | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount | 
					
						
							| 
									
										
										
										
											2023-06-18 10:18:20 +08:00
										 |  |  | 			onlineDisks, modTime, _ := listOnlineDisks(erasureDisks, partsMetadata, test.errs, rQuorum) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			if !modTime.Equal(test.expectedTime) { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 				t.Fatalf("Expected modTime to be equal to %v but was found to be %v", | 
					
						
							|  |  |  | 					test.expectedTime, modTime) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-12-24 15:01:46 +08:00
										 |  |  | 			availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata, | 
					
						
							|  |  |  | 				test.errs, fi, bucket, object, madmin.HealDeepScan) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			test.errs = newErrs | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if test._tamperBackend != noTamper { | 
					
						
							|  |  |  | 				if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 					t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data", | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 						erasureDisks[tamperedIndex]) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		}) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks
 | 
					
						
							|  |  |  | // are consistent with each other.
 | 
					
						
							|  |  |  | func TestListOnlineDisksSmallObjects(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	obj, disks, err := prepareErasure16(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Prepare Erasure backend failed - %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 	setObjectLayer(obj) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(disks) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	type tamperKind int | 
					
						
							|  |  |  | 	const ( | 
					
						
							|  |  |  | 		noTamper    tamperKind = iota | 
					
						
							|  |  |  | 		deletePart  tamperKind = iota | 
					
						
							|  |  |  | 		corruptPart tamperKind = iota | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 	timeSentinel := time.Unix(1, 0).UTC() | 
					
						
							|  |  |  | 	threeNanoSecs := time.Unix(3, 0).UTC() | 
					
						
							|  |  |  | 	fourNanoSecs := time.Unix(4, 0).UTC() | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 	modTimesThreeNone := make([]time.Time, 16) | 
					
						
							|  |  |  | 	modTimesThreeFour := make([]time.Time, 16) | 
					
						
							|  |  |  | 	for i := 0; i < 16; i++ { | 
					
						
							|  |  |  | 		// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
 | 
					
						
							|  |  |  | 		// to be tampered with.
 | 
					
						
							|  |  |  | 		if i > 12 { | 
					
						
							|  |  |  | 			modTimesThreeFour[i] = fourNanoSecs | 
					
						
							|  |  |  | 			modTimesThreeNone[i] = timeSentinel | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		modTimesThreeFour[i] = threeNanoSecs | 
					
						
							|  |  |  | 		modTimesThreeNone[i] = threeNanoSecs | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		modTimes       []time.Time | 
					
						
							|  |  |  | 		expectedTime   time.Time | 
					
						
							|  |  |  | 		errs           []error | 
					
						
							|  |  |  | 		_tamperBackend tamperKind | 
					
						
							|  |  |  | 	}{ | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			modTimes:     modTimesThreeFour, | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			expectedTime: threeNanoSecs, | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			errs: []error{ | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			_tamperBackend: noTamper, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			modTimes:     modTimesThreeNone, | 
					
						
							|  |  |  | 			expectedTime: threeNanoSecs, | 
					
						
							|  |  |  | 			errs: []error{ | 
					
						
							|  |  |  | 				// Disks that have a valid xl.meta.
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				// Some disks can't access xl.meta.
 | 
					
						
							|  |  |  | 				errFileNotFound, errDiskAccessDenied, errDiskNotFound, | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			_tamperBackend: deletePart, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			modTimes:     modTimesThreeNone, | 
					
						
							|  |  |  | 			expectedTime: threeNanoSecs, | 
					
						
							|  |  |  | 			errs: []error{ | 
					
						
							|  |  |  | 				// Disks that have a valid xl.meta.
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				// Some disks don't have xl.meta.
 | 
					
						
							|  |  |  | 				errDiskNotFound, errFileNotFound, errFileNotFound, | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			_tamperBackend: corruptPart, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to make a bucket %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 	data := bytes.Repeat([]byte("a"), smallFileThreshold/2) | 
					
						
							|  |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							| 
									
										
										
										
											2023-07-18 00:52:05 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	erasureDisks, err := z.GetDisks(0, 0) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	for i, test := range testCases { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 		test := test | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 		t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2021-11-22 02:41:30 +08:00
										 |  |  | 			_, err := obj.PutObject(ctx, bucket, object, | 
					
						
							|  |  |  | 				mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-31 04:43:25 +08:00
										 |  |  | 			partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", true, true) | 
					
						
							| 
									
										
										
										
											2023-08-28 00:57:11 +08:00
										 |  |  | 			fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				t.Fatalf("Failed to getLatestFileInfo %v", err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			for j := range partsMetadata { | 
					
						
							|  |  |  | 				if errs[j] != nil { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 					t.Fatalf("expected error to be nil: %s", errs[j]) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 				partsMetadata[j].ModTime = test.modTimes[j] | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-31 04:43:25 +08:00
										 |  |  | 			if erasureDisks, err = writeUniqueFileInfo(ctx, erasureDisks, "", bucket, object, partsMetadata, diskCount(erasureDisks)); err != nil { | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 				t.Fatal(ctx, err) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			tamperedIndex := -1 | 
					
						
							|  |  |  | 			switch test._tamperBackend { | 
					
						
							|  |  |  | 			case deletePart: | 
					
						
							|  |  |  | 				for index, err := range test.errs { | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					// Remove a part from a disk
 | 
					
						
							|  |  |  | 					// which has a valid xl.meta,
 | 
					
						
							|  |  |  | 					// and check if that disk
 | 
					
						
							|  |  |  | 					// appears in outDatedDisks.
 | 
					
						
							|  |  |  | 					tamperedIndex = index | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 					dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ | 
					
						
							|  |  |  | 						Recursive: false, | 
					
						
							| 
									
										
										
										
											2023-11-29 14:35:16 +08:00
										 |  |  | 						Immediate: false, | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 					}) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 					if dErr != nil { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 						t.Fatalf("Failed to delete %s - %v", pathJoin(object, xlStorageFormatFile), dErr) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			case corruptPart: | 
					
						
							|  |  |  | 				for index, err := range test.errs { | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					// Corrupt a part from a disk
 | 
					
						
							|  |  |  | 					// which has a valid xl.meta,
 | 
					
						
							|  |  |  | 					// and check if that disk
 | 
					
						
							|  |  |  | 					// appears in outDatedDisks.
 | 
					
						
							|  |  |  | 					tamperedIndex = index | 
					
						
							|  |  |  | 					filePath := pathJoin(erasureDisks[index].String(), bucket, object, xlStorageFormatFile) | 
					
						
							|  |  |  | 					f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						t.Fatalf("Failed to open %s: %s\n", filePath, err) | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-11-17 01:28:29 +08:00
										 |  |  | 					f.WriteString("oops") // Will cause bitrot error
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 					f.Close() | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 			rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount | 
					
						
							| 
									
										
										
										
											2023-06-18 10:18:20 +08:00
										 |  |  | 			onlineDisks, modTime, _ := listOnlineDisks(erasureDisks, partsMetadata, test.errs, rQuorum) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			if !modTime.Equal(test.expectedTime) { | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 				t.Fatalf("Expected modTime to be equal to %v but was found to be %v", | 
					
						
							|  |  |  | 					test.expectedTime, modTime) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-24 15:01:46 +08:00
										 |  |  | 			availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata, | 
					
						
							|  |  |  | 				test.errs, fi, bucket, object, madmin.HealDeepScan) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			test.errs = newErrs | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if test._tamperBackend != noTamper { | 
					
						
							|  |  |  | 				if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 					t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data", | 
					
						
							| 
									
										
										
										
											2021-04-22 10:06:08 +08:00
										 |  |  | 						erasureDisks[tamperedIndex]) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2017-03-05 06:53:28 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | func TestDisksWithAllParts(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	obj, disks, err := prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		t.Fatalf("Prepare Erasure backend failed - %v", err) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-26 01:13:57 +08:00
										 |  |  | 	setObjectLayer(obj) | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	defer removeRoots(disks) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 	// make data with more than one part
 | 
					
						
							|  |  |  | 	partCount := 3 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	s := z.serverPools[0].sets[0] | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasureDisks := s.getDisks() | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to make a bucket %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-31 04:43:25 +08:00
										 |  |  | 	_, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	readQuorum := len(erasureDisks) / 2 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { | 
					
						
							| 
									
										
										
										
											2017-08-24 08:58:52 +08:00
										 |  |  | 		t.Fatalf("Failed to read xl meta data %v", reducedErr) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 	// Test 1: Test that all disks are returned without any failures with
 | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 	// unmodified meta data
 | 
					
						
							| 
									
										
										
										
											2024-01-31 04:43:25 +08:00
										 |  |  | 	partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true) | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to read xl meta data %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-21 22:25:54 +08:00
										 |  |  | 	fi, err := getLatestFileInfo(ctx, partsMetadata, s.defaultParityCount, errs) | 
					
						
							| 
									
										
										
										
											2021-11-23 01:36:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to get quorum consistent fileInfo %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-18 10:18:20 +08:00
										 |  |  | 	erasureDisks, _, _ = listOnlineDisks(erasureDisks, partsMetadata, errs, readQuorum) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-24 15:01:46 +08:00
										 |  |  | 	filteredDisks, errs, _ := disksWithAllParts(ctx, erasureDisks, partsMetadata, | 
					
						
							|  |  |  | 		errs, fi, bucket, object, madmin.HealDeepScan) | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if len(filteredDisks) != len(erasureDisks) { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 		t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for diskIndex, disk := range filteredDisks { | 
					
						
							|  |  |  | 		if errs[diskIndex] != nil { | 
					
						
							|  |  |  | 			t.Errorf("Unexpected error %s", errs[diskIndex]) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if disk == nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 			t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 	// Test 2: Not synchronized modtime
 | 
					
						
							|  |  |  | 	partsMetadataBackup := partsMetadata[0] | 
					
						
							|  |  |  | 	partsMetadata[0].ModTime = partsMetadata[0].ModTime.Add(-1 * time.Hour) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs = make([]error, len(erasureDisks)) | 
					
						
							| 
									
										
										
										
											2021-12-24 15:01:46 +08:00
										 |  |  | 	filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, | 
					
						
							|  |  |  | 		errs, fi, bucket, object, madmin.HealDeepScan) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if len(filteredDisks) != len(erasureDisks) { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 		t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	for diskIndex, disk := range filteredDisks { | 
					
						
							|  |  |  | 		if diskIndex == 0 && disk != nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 			t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if diskIndex != 0 && disk == nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 			t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	partsMetadata[0] = partsMetadataBackup // Revert before going to the next test
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test 3: Not synchronized DataDir
 | 
					
						
							|  |  |  | 	partsMetadataBackup = partsMetadata[1] | 
					
						
							|  |  |  | 	partsMetadata[1].DataDir = "foo-random" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	errs = make([]error, len(erasureDisks)) | 
					
						
							| 
									
										
										
										
											2021-12-24 15:01:46 +08:00
										 |  |  | 	filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, | 
					
						
							|  |  |  | 		errs, fi, bucket, object, madmin.HealDeepScan) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if len(filteredDisks) != len(erasureDisks) { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 		t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	for diskIndex, disk := range filteredDisks { | 
					
						
							|  |  |  | 		if diskIndex == 1 && disk != nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 			t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if diskIndex != 1 && disk == nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 			t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2021-05-20 03:04:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	partsMetadata[1] = partsMetadataBackup // Revert before going to the next test
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test 4: key = disk index, value = part name with hash mismatch
 | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	diskFailures := make(map[int]string) | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	diskFailures[0] = "part.1" | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	diskFailures[3] = "part.1" | 
					
						
							| 
									
										
										
										
											2019-05-15 03:33:18 +08:00
										 |  |  | 	diskFailures[15] = "part.1" | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	for diskIndex, partName := range diskFailures { | 
					
						
							| 
									
										
										
										
											2023-09-02 04:45:58 +08:00
										 |  |  | 		for i := range partsMetadata[diskIndex].Parts { | 
					
						
							| 
									
										
										
										
											2020-03-03 08:29:30 +08:00
										 |  |  | 			if fmt.Sprintf("part.%d", i+1) == partName { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName) | 
					
						
							| 
									
										
										
										
											2019-11-05 01:30:59 +08:00
										 |  |  | 				f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					t.Fatalf("Failed to open %s: %s\n", filePath, err) | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-11-17 01:28:29 +08:00
										 |  |  | 				f.WriteString("oops") // Will cause bitrot error
 | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 				f.Close() | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	errs = make([]error, len(erasureDisks)) | 
					
						
							| 
									
										
										
										
											2021-12-24 15:01:46 +08:00
										 |  |  | 	filteredDisks, errs, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, | 
					
						
							|  |  |  | 		errs, fi, bucket, object, madmin.HealDeepScan) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if len(filteredDisks) != len(erasureDisks) { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 		t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for diskIndex, disk := range filteredDisks { | 
					
						
							|  |  |  | 		if _, ok := diskFailures[diskIndex]; ok { | 
					
						
							|  |  |  | 			if disk != nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 				t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if errs[diskIndex] == nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 				t.Errorf("Expected error not received, driveIndex: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 				t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if errs[diskIndex] != nil { | 
					
						
							| 
									
										
										
										
											2022-08-05 07:10:08 +08:00
										 |  |  | 				t.Errorf("Unexpected error, %s, driveIndex: %d", errs[diskIndex], diskIndex) | 
					
						
							| 
									
										
										
										
											2017-06-15 08:13:02 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2023-04-15 07:23:28 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | func TestCommonParities(t *testing.T) { | 
					
						
							|  |  |  | 	// This test uses two FileInfo values that represent the same object but
 | 
					
						
							|  |  |  | 	// have different parities. They occur in equal number of drives, but only
 | 
					
						
							|  |  |  | 	// one has read quorum. commonParity should pick the parity corresponding to
 | 
					
						
							|  |  |  | 	// the FileInfo which has read quorum.
 | 
					
						
							|  |  |  | 	fi1 := FileInfo{ | 
					
						
							|  |  |  | 		Volume:         "mybucket", | 
					
						
							|  |  |  | 		Name:           "myobject", | 
					
						
							|  |  |  | 		VersionID:      "", | 
					
						
							|  |  |  | 		IsLatest:       true, | 
					
						
							|  |  |  | 		Deleted:        false, | 
					
						
							|  |  |  | 		ExpireRestored: false, | 
					
						
							|  |  |  | 		DataDir:        "4a01d9dd-0c5e-4103-88f8-b307c57d212e", | 
					
						
							|  |  |  | 		XLV1:           false, | 
					
						
							|  |  |  | 		ModTime:        time.Date(2023, time.March, 15, 11, 18, 4, 989906961, time.UTC), | 
					
						
							|  |  |  | 		Size:           329289, Mode: 0x0, WrittenByVersion: 0x63c77756, | 
					
						
							|  |  |  | 		Metadata: map[string]string{ | 
					
						
							|  |  |  | 			"content-type": "application/octet-stream", "etag": "f205307ef9f50594c4b86d9c246bee86", "x-minio-internal-erasure-upgraded": "5->6", "x-minio-internal-inline-data": "true", | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		Parts: []ObjectPartInfo{ | 
					
						
							|  |  |  | 			{ | 
					
						
							|  |  |  | 				ETag:       "", | 
					
						
							|  |  |  | 				Number:     1, | 
					
						
							|  |  |  | 				Size:       329289, | 
					
						
							|  |  |  | 				ActualSize: 329289, | 
					
						
							|  |  |  | 				ModTime:    time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), | 
					
						
							|  |  |  | 				Index:      []uint8(nil), | 
					
						
							|  |  |  | 				Checksums:  map[string]string(nil), | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		Erasure: ErasureInfo{ | 
					
						
							|  |  |  | 			Algorithm:    "ReedSolomon", | 
					
						
							|  |  |  | 			DataBlocks:   6, | 
					
						
							|  |  |  | 			ParityBlocks: 6, | 
					
						
							|  |  |  | 			BlockSize:    1048576, | 
					
						
							|  |  |  | 			Index:        1, | 
					
						
							|  |  |  | 			Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, | 
					
						
							|  |  |  | 			Checksums:    []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}}, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		NumVersions: 1, | 
					
						
							|  |  |  | 		Idx:         0, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fi2 := FileInfo{ | 
					
						
							|  |  |  | 		Volume:           "mybucket", | 
					
						
							|  |  |  | 		Name:             "myobject", | 
					
						
							|  |  |  | 		VersionID:        "", | 
					
						
							|  |  |  | 		IsLatest:         true, | 
					
						
							|  |  |  | 		Deleted:          false, | 
					
						
							|  |  |  | 		DataDir:          "6f5c106d-9d28-4c85-a7f4-eac56225876b", | 
					
						
							|  |  |  | 		ModTime:          time.Date(2023, time.March, 15, 19, 57, 30, 492530160, time.UTC), | 
					
						
							|  |  |  | 		Size:             329289, | 
					
						
							|  |  |  | 		Mode:             0x0, | 
					
						
							|  |  |  | 		WrittenByVersion: 0x63c77756, | 
					
						
							|  |  |  | 		Metadata:         map[string]string{"content-type": "application/octet-stream", "etag": "f205307ef9f50594c4b86d9c246bee86", "x-minio-internal-inline-data": "true"}, | 
					
						
							|  |  |  | 		Parts: []ObjectPartInfo{ | 
					
						
							|  |  |  | 			{ | 
					
						
							|  |  |  | 				ETag:       "", | 
					
						
							|  |  |  | 				Number:     1, | 
					
						
							|  |  |  | 				Size:       329289, | 
					
						
							|  |  |  | 				ActualSize: 329289, | 
					
						
							|  |  |  | 				ModTime:    time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), | 
					
						
							|  |  |  | 				Index:      []uint8(nil), | 
					
						
							|  |  |  | 				Checksums:  map[string]string(nil), | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		Erasure: ErasureInfo{ | 
					
						
							|  |  |  | 			Algorithm:    "ReedSolomon", | 
					
						
							|  |  |  | 			DataBlocks:   7, | 
					
						
							|  |  |  | 			ParityBlocks: 5, | 
					
						
							|  |  |  | 			BlockSize:    1048576, | 
					
						
							|  |  |  | 			Index:        2, | 
					
						
							|  |  |  | 			Distribution: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, | 
					
						
							|  |  |  | 			Checksums: []ChecksumInfo{ | 
					
						
							|  |  |  | 				{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}, | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		NumVersions: 1, | 
					
						
							|  |  |  | 		Idx:         0, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fiDel := FileInfo{ | 
					
						
							|  |  |  | 		Volume:           "mybucket", | 
					
						
							|  |  |  | 		Name:             "myobject", | 
					
						
							|  |  |  | 		VersionID:        "", | 
					
						
							|  |  |  | 		IsLatest:         true, | 
					
						
							|  |  |  | 		Deleted:          true, | 
					
						
							|  |  |  | 		ModTime:          time.Date(2023, time.March, 15, 19, 57, 30, 492530160, time.UTC), | 
					
						
							|  |  |  | 		Mode:             0x0, | 
					
						
							|  |  |  | 		WrittenByVersion: 0x63c77756, | 
					
						
							|  |  |  | 		NumVersions:      1, | 
					
						
							|  |  |  | 		Idx:              0, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tests := []struct { | 
					
						
							|  |  |  | 		fi1, fi2 FileInfo | 
					
						
							|  |  |  | 	}{ | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			fi1: fi1, | 
					
						
							|  |  |  | 			fi2: fi2, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			fi1: fi1, | 
					
						
							|  |  |  | 			fi2: fiDel, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for idx, test := range tests { | 
					
						
							|  |  |  | 		var metaArr []FileInfo | 
					
						
							|  |  |  | 		for i := 0; i < 12; i++ { | 
					
						
							|  |  |  | 			fi := test.fi1 | 
					
						
							|  |  |  | 			if i%2 == 0 { | 
					
						
							|  |  |  | 				fi = test.fi2 | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			metaArr = append(metaArr, fi) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		parities := listObjectParities(metaArr, make([]error, len(metaArr))) | 
					
						
							|  |  |  | 		parity := commonParity(parities, 5) | 
					
						
							|  |  |  | 		var match int | 
					
						
							|  |  |  | 		for _, fi := range metaArr { | 
					
						
							|  |  |  | 			if fi.Erasure.ParityBlocks == parity { | 
					
						
							|  |  |  | 				match++ | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if match < len(metaArr)-parity { | 
					
						
							|  |  |  | 			t.Fatalf("Test %d: Expected %d drives with parity=%d, but got %d", idx, len(metaArr)-parity, parity, match) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } |