| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  |  * Minio Cloud Storage, (C) 2016 Minio, Inc. | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | import ( | 
					
						
							|  |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	"testing" | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2016-07-26 05:17:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/minio/minio/pkg/bpool" | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | ) | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | import "reflect" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Tests getReadDisks which returns readable disks slice from which we can
 | 
					
						
							|  |  |  | // read parallelly.
 | 
					
						
							|  |  |  | func testGetReadDisks(t *testing.T, xl xlObjects) { | 
					
						
							|  |  |  | 	d := xl.storageDisks | 
					
						
							|  |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		index     int          // index argument for getReadDisks
 | 
					
						
							|  |  |  | 		argDisks  []StorageAPI // disks argument for getReadDisks
 | 
					
						
							|  |  |  | 		retDisks  []StorageAPI // disks return value from getReadDisks
 | 
					
						
							|  |  |  | 		nextIndex int          // return value from getReadDisks
 | 
					
						
							|  |  |  | 		err       error        // error return value from getReadDisks
 | 
					
						
							|  |  |  | 	}{ | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 1.
 | 
					
						
							|  |  |  | 		// When all disks are available, should return data disks.
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			0, | 
					
						
							|  |  |  | 			[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]}, | 
					
						
							|  |  |  | 			[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil}, | 
					
						
							|  |  |  | 			8, | 
					
						
							|  |  |  | 			nil, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 2.
 | 
					
						
							|  |  |  | 		// If a parity disk is down, should return all data disks.
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			0, | 
					
						
							|  |  |  | 			[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, d[10], d[11], d[12], d[13], d[14], d[15]}, | 
					
						
							|  |  |  | 			[]StorageAPI{d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, nil, nil, nil, nil, nil, nil}, | 
					
						
							|  |  |  | 			8, | 
					
						
							|  |  |  | 			nil, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 3.
 | 
					
						
							|  |  |  | 		// If a data disk is down, should return 7 data and 1 parity.
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			0, | 
					
						
							|  |  |  | 			[]StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			[]StorageAPI{nil, d[1], d[2], d[3], d[4], d[5], d[6], d[7], d[8], nil, nil, nil, nil, nil, nil, nil}, | 
					
						
							|  |  |  | 			9, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			nil, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 4.
 | 
					
						
							|  |  |  | 		// If 7 data disks are down, should return 1 data and 7 parity.
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			0, | 
					
						
							|  |  |  | 			[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], nil}, | 
					
						
							|  |  |  | 			15, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			nil, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 5.
 | 
					
						
							|  |  |  | 		// When 2 disks fail during parallelRead, next call to getReadDisks should return 3 disks
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			8, | 
					
						
							|  |  |  | 			[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, d[8], d[9], nil, nil, nil, nil, nil, nil}, | 
					
						
							|  |  |  | 			10, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			nil, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 6.
 | 
					
						
							|  |  |  | 		// If 2 disks again fail from the 3 disks returned previously, return next 2 disks
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			11, | 
					
						
							|  |  |  | 			[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], d[11], d[12], d[13], d[14], d[15]}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			[]StorageAPI{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, d[11], nil, nil, nil, nil}, | 
					
						
							|  |  |  | 			12, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			nil, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 		// Test case - 7.
 | 
					
						
							|  |  |  | 		// No more disks are available for read, return error
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 		{ | 
					
						
							|  |  |  | 			13, | 
					
						
							|  |  |  | 			[]StorageAPI{nil, nil, d[2], d[3], d[4], d[5], d[6], d[7], nil, nil, d[10], nil, nil, nil, nil, nil}, | 
					
						
							|  |  |  | 			nil, | 
					
						
							|  |  |  | 			0, | 
					
						
							|  |  |  | 			errXLReadQuorum, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, test := range testCases { | 
					
						
							|  |  |  | 		disks, nextIndex, err := getReadDisks(test.argDisks, test.index, xl.dataBlocks) | 
					
						
							| 
									
										
										
										
											2016-08-26 00:39:01 +08:00
										 |  |  | 		if errorCause(err) != test.err { | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			t.Errorf("test-case %d - expected error : %s, got : %s", i+1, test.err, err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if test.nextIndex != nextIndex { | 
					
						
							|  |  |  | 			t.Errorf("test-case %d - expected nextIndex: %d, got : %d", i+1, test.nextIndex, nextIndex) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-08-16 22:57:14 +08:00
										 |  |  | 		if !reflect.DeepEqual(test.retDisks, disks) { | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			t.Errorf("test-case %d : incorrect disks returned. expected %+v, got %+v", i+1, test.retDisks, disks) | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Test getOrderedDisks which returns ordered slice of disks from their
 | 
					
						
							|  |  |  | // actual distribution.
 | 
					
						
							|  |  |  | func testGetOrderedDisks(t *testing.T, xl xlObjects) { | 
					
						
							|  |  |  | 	disks := xl.storageDisks | 
					
						
							|  |  |  | 	distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15} | 
					
						
							| 
									
										
										
										
											2016-07-15 05:59:01 +08:00
										 |  |  | 	orderedDisks := getOrderedDisks(distribution, disks) | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 	// From the "distribution" above you can notice that:
 | 
					
						
							|  |  |  | 	// 1st data block is in the 9th disk (i.e distribution index 8)
 | 
					
						
							|  |  |  | 	// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
 | 
					
						
							|  |  |  | 	if orderedDisks[0] != disks[8] || | 
					
						
							|  |  |  | 		orderedDisks[1] != disks[7] || | 
					
						
							|  |  |  | 		orderedDisks[2] != disks[9] || | 
					
						
							|  |  |  | 		orderedDisks[3] != disks[6] || | 
					
						
							|  |  |  | 		orderedDisks[4] != disks[10] || | 
					
						
							|  |  |  | 		orderedDisks[5] != disks[5] || | 
					
						
							|  |  |  | 		orderedDisks[6] != disks[11] || | 
					
						
							|  |  |  | 		orderedDisks[7] != disks[4] || | 
					
						
							|  |  |  | 		orderedDisks[8] != disks[12] || | 
					
						
							|  |  |  | 		orderedDisks[9] != disks[3] || | 
					
						
							|  |  |  | 		orderedDisks[10] != disks[13] || | 
					
						
							|  |  |  | 		orderedDisks[11] != disks[2] || | 
					
						
							|  |  |  | 		orderedDisks[12] != disks[14] || | 
					
						
							|  |  |  | 		orderedDisks[13] != disks[1] || | 
					
						
							|  |  |  | 		orderedDisks[14] != disks[15] || | 
					
						
							|  |  |  | 		orderedDisks[15] != disks[0] { | 
					
						
							|  |  |  | 		t.Errorf("getOrderedDisks returned incorrect order.") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Test for isSuccessDataBlocks and isSuccessDecodeBlocks.
 | 
					
						
							|  |  |  | func TestIsSuccessBlocks(t *testing.T) { | 
					
						
							|  |  |  | 	dataBlocks := 8 | 
					
						
							|  |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		enBlocks      [][]byte // data and parity blocks.
 | 
					
						
							|  |  |  | 		successData   bool     // expected return value of isSuccessDataBlocks()
 | 
					
						
							|  |  |  | 		successDecode bool     // expected return value of isSuccessDecodeBlocks()
 | 
					
						
							|  |  |  | 	}{ | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// When all data and partity blocks are available.
 | 
					
						
							|  |  |  | 			[][]byte{ | 
					
						
							| 
									
										
										
										
											2016-07-09 02:05:08 +08:00
										 |  |  | 				{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							|  |  |  | 				{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			true, | 
					
						
							|  |  |  | 			true, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// When one data block is not available.
 | 
					
						
							|  |  |  | 			[][]byte{ | 
					
						
							| 
									
										
										
										
											2016-07-09 02:05:08 +08:00
										 |  |  | 				nil, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							|  |  |  | 				{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			false, | 
					
						
							|  |  |  | 			true, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// When one data and all parity are available, enough for reedsolomon.Reconstruct()
 | 
					
						
							|  |  |  | 			[][]byte{ | 
					
						
							| 
									
										
										
										
											2016-07-09 02:05:08 +08:00
										 |  |  | 				nil, nil, nil, nil, nil, nil, nil, {'a'}, | 
					
						
							|  |  |  | 				{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			false, | 
					
						
							|  |  |  | 			true, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			// When all data disks are not available, enough for reedsolomon.Reconstruct()
 | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			[][]byte{ | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							| 
									
										
										
										
											2016-07-09 02:05:08 +08:00
										 |  |  | 				{'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 			false, | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 			true, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			// Not enough disks for reedsolomon.Reconstruct()
 | 
					
						
							|  |  |  | 			[][]byte{ | 
					
						
							|  |  |  | 				nil, nil, nil, nil, nil, nil, nil, nil, | 
					
						
							|  |  |  | 				nil, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, {'a'}, | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 			false, | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 			false, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, test := range testCases { | 
					
						
							|  |  |  | 		got := isSuccessDataBlocks(test.enBlocks, dataBlocks) | 
					
						
							|  |  |  | 		if test.successData != got { | 
					
						
							|  |  |  | 			t.Errorf("test-case %d : expected %v got %v", i+1, test.successData, got) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		got = isSuccessDecodeBlocks(test.enBlocks, dataBlocks) | 
					
						
							|  |  |  | 		if test.successDecode != got { | 
					
						
							|  |  |  | 			t.Errorf("test-case %d : expected %v got %v", i+1, test.successDecode, got) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Wrapper function for testGetReadDisks, testGetOrderedDisks.
 | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | func TestErasureReadUtils(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2016-08-31 10:22:27 +08:00
										 |  |  | 	nDisks := 16 | 
					
						
							|  |  |  | 	disks, err := getRandomDisks(nDisks) | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-10-19 03:49:24 +08:00
										 |  |  | 	endpoints, err := parseStorageEndPoints(disks, 0) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	objLayer, _, err := initObjectLayer(endpoints, nil) | 
					
						
							| 
									
										
										
										
											2016-08-31 10:22:27 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 		removeRoots(disks) | 
					
						
							| 
									
										
										
										
											2016-08-31 10:22:27 +08:00
										 |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer removeRoots(disks) | 
					
						
							| 
									
										
										
										
											2016-07-06 02:41:25 +08:00
										 |  |  | 	xl := objLayer.(xlObjects) | 
					
						
							|  |  |  | 	testGetReadDisks(t, xl) | 
					
						
							|  |  |  | 	testGetOrderedDisks(t, xl) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // Simulates a faulty disk for ReadFile()
 | 
					
						
							|  |  |  | type ReadDiskDown struct { | 
					
						
							|  |  |  | 	*posix | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (r ReadDiskDown) ReadFile(volume string, path string, offset int64, buf []byte) (n int64, err error) { | 
					
						
							|  |  |  | 	return 0, errFaultyDisk | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func TestErasureReadFileDiskFail(t *testing.T) { | 
					
						
							|  |  |  | 	// Initialize environment needed for the test.
 | 
					
						
							|  |  |  | 	dataBlocks := 7 | 
					
						
							|  |  |  | 	parityBlocks := 7 | 
					
						
							|  |  |  | 	blockSize := int64(blockSizeV1) | 
					
						
							|  |  |  | 	setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Error(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer setup.Remove() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	disks := setup.disks | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Prepare a slice of 1MB with random data.
 | 
					
						
							|  |  |  | 	data := make([]byte, 1*1024*1024) | 
					
						
							|  |  |  | 	length := int64(len(data)) | 
					
						
							|  |  |  | 	_, err = rand.Read(data) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create a test file to read from.
 | 
					
						
							| 
									
										
										
										
											2016-07-28 17:20:34 +08:00
										 |  |  | 	size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if size != length { | 
					
						
							|  |  |  | 		t.Errorf("erasureCreateFile returned %d, expected %d", size, length) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-26 05:17:01 +08:00
										 |  |  | 	// create byte pool which will be used by erasureReadFile for
 | 
					
						
							|  |  |  | 	// reading from disks and erasure decoding.
 | 
					
						
							|  |  |  | 	chunkSize := getChunkSize(blockSize, dataBlocks) | 
					
						
							|  |  |  | 	pool := bpool.NewBytePool(chunkSize, len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	buf := &bytes.Buffer{} | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 	_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Error(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !bytes.Equal(buf.Bytes(), data) { | 
					
						
							|  |  |  | 		t.Error("Contents of the erasure coded file differs") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// 2 disks down. Read should succeed.
 | 
					
						
							|  |  |  | 	disks[4] = ReadDiskDown{disks[4].(*posix)} | 
					
						
							|  |  |  | 	disks[5] = ReadDiskDown{disks[5].(*posix)} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	buf.Reset() | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 	_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Error(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !bytes.Equal(buf.Bytes(), data) { | 
					
						
							|  |  |  | 		t.Error("Contents of the erasure coded file differs") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// 4 more disks down. 6 disks down in total. Read should succeed.
 | 
					
						
							|  |  |  | 	disks[6] = ReadDiskDown{disks[6].(*posix)} | 
					
						
							|  |  |  | 	disks[8] = ReadDiskDown{disks[8].(*posix)} | 
					
						
							|  |  |  | 	disks[9] = ReadDiskDown{disks[9].(*posix)} | 
					
						
							|  |  |  | 	disks[11] = ReadDiskDown{disks[11].(*posix)} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	buf.Reset() | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 	_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Error(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !bytes.Equal(buf.Bytes(), data) { | 
					
						
							|  |  |  | 		t.Error("Contents of the erasure coded file differs") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 	// 2 more disk down. 8 disks down in total. Read should fail.
 | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	disks[12] = ReadDiskDown{disks[12].(*posix)} | 
					
						
							| 
									
										
										
										
											2016-08-15 16:25:41 +08:00
										 |  |  | 	disks[13] = ReadDiskDown{disks[13].(*posix)} | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	buf.Reset() | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 	_, err = erasureReadFile(buf, disks, "testbucket", "testobject", 0, length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) | 
					
						
							| 
									
										
										
										
											2016-08-26 00:39:01 +08:00
										 |  |  | 	if errorCause(err) != errXLReadQuorum { | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 		t.Fatal("expected errXLReadQuorum error") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func TestErasureReadFileOffsetLength(t *testing.T) { | 
					
						
							|  |  |  | 	// Initialize environment needed for the test.
 | 
					
						
							|  |  |  | 	dataBlocks := 7 | 
					
						
							|  |  |  | 	parityBlocks := 7 | 
					
						
							|  |  |  | 	blockSize := int64(1 * 1024 * 1024) | 
					
						
							|  |  |  | 	setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Error(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer setup.Remove() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	disks := setup.disks | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 	// Prepare a slice of 5MB with random data.
 | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	data := make([]byte, 5*1024*1024) | 
					
						
							|  |  |  | 	length := int64(len(data)) | 
					
						
							|  |  |  | 	_, err = rand.Read(data) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create a test file to read from.
 | 
					
						
							| 
									
										
										
										
											2016-07-28 17:20:34 +08:00
										 |  |  | 	size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if size != length { | 
					
						
							|  |  |  | 		t.Errorf("erasureCreateFile returned %d, expected %d", size, length) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		offset, length int64 | 
					
						
							|  |  |  | 	}{ | 
					
						
							|  |  |  | 		// Full file.
 | 
					
						
							|  |  |  | 		{0, length}, | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 		// Read nothing.
 | 
					
						
							|  |  |  | 		{length, 0}, | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 		// 2nd block.
 | 
					
						
							|  |  |  | 		{blockSize, blockSize}, | 
					
						
							|  |  |  | 		// Test cases for random offsets and lengths.
 | 
					
						
							|  |  |  | 		{blockSize - 1, 2}, | 
					
						
							|  |  |  | 		{blockSize - 1, blockSize + 1}, | 
					
						
							|  |  |  | 		{blockSize + 1, blockSize - 1}, | 
					
						
							|  |  |  | 		{blockSize + 1, blockSize}, | 
					
						
							|  |  |  | 		{blockSize + 1, blockSize + 1}, | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 		{blockSize*2 - 1, blockSize + 1}, | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 		{length - 1, 1}, | 
					
						
							|  |  |  | 		{length - blockSize, blockSize}, | 
					
						
							|  |  |  | 		{length - blockSize - 1, blockSize}, | 
					
						
							|  |  |  | 		{length - blockSize - 1, blockSize + 1}, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-07-26 05:17:01 +08:00
										 |  |  | 	chunkSize := getChunkSize(blockSize, dataBlocks) | 
					
						
							|  |  |  | 	pool := bpool.NewBytePool(chunkSize, len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 	// Compare the data read from file with "data" byte array.
 | 
					
						
							|  |  |  | 	for i, testCase := range testCases { | 
					
						
							|  |  |  | 		expected := data[testCase.offset:(testCase.offset + testCase.length)] | 
					
						
							|  |  |  | 		buf := &bytes.Buffer{} | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 		_, err = erasureReadFile(buf, disks, "testbucket", "testobject", testCase.offset, testCase.length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) | 
					
						
							| 
									
										
										
										
											2016-07-19 14:56:16 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Error(err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		got := buf.Bytes() | 
					
						
							|  |  |  | 		if !bytes.Equal(expected, got) { | 
					
						
							|  |  |  | 			t.Errorf("Test %d : read data is different from what was expected", i+1) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // Test erasureReadFile with random offset and lengths.
 | 
					
						
							|  |  |  | // This test is t.Skip()ed as it a long time to run, hence should be run
 | 
					
						
							|  |  |  | // explicitly after commenting out t.Skip()
 | 
					
						
							|  |  |  | func TestErasureReadFileRandomOffsetLength(t *testing.T) { | 
					
						
							|  |  |  | 	// Comment the following line to run this test.
 | 
					
						
							|  |  |  | 	t.SkipNow() | 
					
						
							|  |  |  | 	// Initialize environment needed for the test.
 | 
					
						
							|  |  |  | 	dataBlocks := 7 | 
					
						
							|  |  |  | 	parityBlocks := 7 | 
					
						
							|  |  |  | 	blockSize := int64(1 * 1024 * 1024) | 
					
						
							|  |  |  | 	setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Error(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer setup.Remove() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	disks := setup.disks | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Prepare a slice of 5MB with random data.
 | 
					
						
							|  |  |  | 	data := make([]byte, 5*1024*1024) | 
					
						
							|  |  |  | 	length := int64(len(data)) | 
					
						
							|  |  |  | 	_, err = rand.Read(data) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// 10000 iterations with random offsets and lengths.
 | 
					
						
							|  |  |  | 	iterations := 10000 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create a test file to read from.
 | 
					
						
							| 
									
										
										
										
											2016-07-28 17:20:34 +08:00
										 |  |  | 	size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if size != length { | 
					
						
							|  |  |  | 		t.Errorf("erasureCreateFile returned %d, expected %d", size, length) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// To generate random offset/length.
 | 
					
						
							| 
									
										
										
										
											2016-09-20 04:14:55 +08:00
										 |  |  | 	r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-26 05:17:01 +08:00
										 |  |  | 	// create pool buffer which will be used by erasureReadFile for
 | 
					
						
							|  |  |  | 	// reading from disks and erasure decoding.
 | 
					
						
							|  |  |  | 	chunkSize := getChunkSize(blockSize, dataBlocks) | 
					
						
							|  |  |  | 	pool := bpool.NewBytePool(chunkSize, len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 	buf := &bytes.Buffer{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Verify erasureReadFile() for random offsets and lengths.
 | 
					
						
							|  |  |  | 	for i := 0; i < iterations; i++ { | 
					
						
							|  |  |  | 		offset := r.Int63n(length) | 
					
						
							|  |  |  | 		readLen := r.Int63n(length - offset) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		expected := data[offset : offset+readLen] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 		_, err = erasureReadFile(buf, disks, "testbucket", "testobject", offset, readLen, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) | 
					
						
							| 
									
										
										
										
											2016-07-20 16:30:30 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatal(err, offset, readLen) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		got := buf.Bytes() | 
					
						
							|  |  |  | 		if !bytes.Equal(expected, got) { | 
					
						
							|  |  |  | 			t.Fatalf("read data is different from what was expected, offset=%d length=%d", offset, readLen) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		buf.Reset() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } |