| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 	"crypto/md5" | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	crand "crypto/rand" | 
					
						
							| 
									
										
										
										
											2020-11-24 01:12:17 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2016-10-09 08:08:17 +08:00
										 |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 	"path/filepath" | 
					
						
							| 
									
										
										
										
											2022-11-07 16:11:21 +08:00
										 |  |  | 	"runtime" | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	"testing" | 
					
						
							| 
									
										
										
										
											2016-11-23 10:18:22 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 	"github.com/dustin/go-humanize" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/config/storageclass" | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func TestRepeatPutObjectPart(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	var objLayer ObjectLayer | 
					
						
							|  |  |  | 	var disks []string | 
					
						
							|  |  |  | 	var err error | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	var opts ObjectOptions | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	objLayer, disks, err = prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// cleaning up of temporary test directories
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer objLayer.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	defer removeRoots(disks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = objLayer.MakeBucket(ctx, "bucket1", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 	res, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts) | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-23 10:18:22 +08:00
										 |  |  | 	fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte) | 
					
						
							| 
									
										
										
										
											2016-11-22 05:51:05 +08:00
										 |  |  | 	md5Hex := getMD5Hash(fiveMBBytes) | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 	_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts) | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
 | 
					
						
							| 
									
										
										
										
											2022-08-30 07:57:16 +08:00
										 |  |  | 	_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts) | 
					
						
							| 
									
										
										
										
											2016-06-22 06:48:27 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func TestErasureDeleteObjectBasic(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		bucket      string | 
					
						
							|  |  |  | 		object      string | 
					
						
							|  |  |  | 		expectedErr error | 
					
						
							|  |  |  | 	}{ | 
					
						
							| 
									
										
										
										
											2018-09-01 04:16:35 +08:00
										 |  |  | 		{".test", "dir/obj", BucketNameInvalid{Bucket: ".test"}}, | 
					
						
							|  |  |  | 		{"----", "dir/obj", BucketNameInvalid{Bucket: "----"}}, | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 		{"bucket", "", ObjectNameInvalid{Bucket: "bucket", Object: ""}}, | 
					
						
							|  |  |  | 		{"bucket", "doesnotexist", ObjectNotFound{Bucket: "bucket", Object: "doesnotexist"}}, | 
					
						
							| 
									
										
										
										
											2018-09-01 04:16:35 +08:00
										 |  |  | 		{"bucket", "dir/doesnotexist", ObjectNotFound{Bucket: "bucket", Object: "dir/doesnotexist"}}, | 
					
						
							|  |  |  | 		{"bucket", "dir", ObjectNotFound{Bucket: "bucket", Object: "dir"}}, | 
					
						
							|  |  |  | 		{"bucket", "dir/", ObjectNotFound{Bucket: "bucket", Object: "dir/"}}, | 
					
						
							|  |  |  | 		{"bucket", "dir/obj", nil}, | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// Create an instance of xl backend
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	xl, fsDirs, err := prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer xl.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = xl.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-01 04:16:35 +08:00
										 |  |  | 	// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
 | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	_, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		t.Fatalf("Erasure Object upload failed: <ERROR> %s", err) | 
					
						
							| 
									
										
										
										
											2016-08-12 15:26:30 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for _, test := range testCases { | 
					
						
							|  |  |  | 		test := test | 
					
						
							|  |  |  | 		t.Run("", func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 			_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{}) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				t.Fatal("dir/obj not found before last test") | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			_, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{}) | 
					
						
							|  |  |  | 			if test.expectedErr != nil && actualErr != test.expectedErr { | 
					
						
							|  |  |  | 				t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if test.expectedErr == nil && actualErr != nil { | 
					
						
							|  |  |  | 				t.Errorf("Expected to pass, but failed with %s", actualErr) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// Cleanup backend directories
 | 
					
						
							|  |  |  | 	removeRoots(fsDirs) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-05 12:01:26 +08:00
										 |  |  | func TestDeleteObjectsVersioned(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure(ctx, 16) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal("Unable to initialize 'Erasure' object layer.", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Remove all dirs.
 | 
					
						
							|  |  |  | 	for _, dir := range fsDirs { | 
					
						
							|  |  |  | 		defer os.RemoveAll(dir) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	type testCaseType struct { | 
					
						
							|  |  |  | 		bucket string | 
					
						
							|  |  |  | 		object string | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucketName := "bucket" | 
					
						
							|  |  |  | 	testCases := []testCaseType{ | 
					
						
							|  |  |  | 		{bucketName, "dir/obj1"}, | 
					
						
							|  |  |  | 		{bucketName, "dir/obj1"}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, bucketName, MakeBucketOptions{ | 
					
						
							| 
									
										
										
										
											2022-03-05 12:01:26 +08:00
										 |  |  | 		VersioningEnabled: true, | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	names := make([]ObjectToDelete, len(testCases)) | 
					
						
							|  |  |  | 	for i, testCase := range testCases { | 
					
						
							|  |  |  | 		objInfo, err := obj.PutObject(ctx, testCase.bucket, testCase.object, | 
					
						
							|  |  |  | 			mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{ | 
					
						
							|  |  |  | 				Versioned: true, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatalf("Erasure Object upload failed: <ERROR> %s", err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		names[i] = ObjectToDelete{ | 
					
						
							|  |  |  | 			ObjectV: ObjectV{ | 
					
						
							|  |  |  | 				ObjectName: objInfo.Name, | 
					
						
							|  |  |  | 				VersionID:  objInfo.VersionID, | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	names = append(names, ObjectToDelete{ | 
					
						
							|  |  |  | 		ObjectV: ObjectV{ | 
					
						
							|  |  |  | 			ObjectName: "dir/obj1", | 
					
						
							|  |  |  | 			VersionID:  mustGetUUID(), // add a non-existent UUID.
 | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_, delErrs := obj.DeleteObjects(ctx, bucketName, names, ObjectOptions{ | 
					
						
							|  |  |  | 		Versioned: true, | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 	for i := range delErrs { | 
					
						
							|  |  |  | 		if delErrs[i] != nil { | 
					
						
							|  |  |  | 			t.Errorf("Failed to remove object `%v` with the error: `%v`", names[i], delErrs[i]) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, test := range testCases { | 
					
						
							|  |  |  | 		_, statErr := obj.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{ | 
					
						
							|  |  |  | 			VersionID: names[i].ObjectV.VersionID, | 
					
						
							|  |  |  | 		}) | 
					
						
							|  |  |  | 		switch statErr.(type) { | 
					
						
							|  |  |  | 		case VersionNotFound: | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			t.Fatalf("Object %s is not removed", test.bucket+SlashSeparator+test.object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-09-20 02:05:16 +08:00
										 |  |  | 	if _, err = os.ReadFile(pathJoin(fsDirs[0], bucketName, "dir/obj1", "xl.meta")); err == nil { | 
					
						
							| 
									
										
										
										
											2022-03-05 12:01:26 +08:00
										 |  |  | 		t.Fatalf("xl.meta still present after removal") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func TestErasureDeleteObjectsErasureSet(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasureSets32(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal("Unable to initialize 'Erasure' object layer.", err) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	setObjectLayer(obj) | 
					
						
							|  |  |  | 	initConfigSubsystem(ctx, obj) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Remove all dirs.
 | 
					
						
							|  |  |  | 	for _, dir := range fsDirs { | 
					
						
							|  |  |  | 		defer os.RemoveAll(dir) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	type testCaseType struct { | 
					
						
							|  |  |  | 		bucket string | 
					
						
							|  |  |  | 		object string | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucketName := "bucket" | 
					
						
							|  |  |  | 	testCases := []testCaseType{ | 
					
						
							|  |  |  | 		{bucketName, "dir/obj1"}, | 
					
						
							|  |  |  | 		{bucketName, "dir/obj2"}, | 
					
						
							|  |  |  | 		{bucketName, "obj3"}, | 
					
						
							|  |  |  | 		{bucketName, "obj_4"}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	if err = obj.MakeBucket(ctx, bucketName, MakeBucketOptions{}); err != nil { | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, testCase := range testCases { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		_, err = obj.PutObject(ctx, testCase.bucket, testCase.object, | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 			mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			t.Fatalf("Erasure Object upload failed: <ERROR> %s", err) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	toObjectNames := func(testCases []testCaseType) []ObjectToDelete { | 
					
						
							|  |  |  | 		names := make([]ObjectToDelete, len(testCases)) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		for i := range testCases { | 
					
						
							| 
									
										
										
										
											2022-01-03 17:28:52 +08:00
										 |  |  | 			names[i] = ObjectToDelete{ | 
					
						
							|  |  |  | 				ObjectV: ObjectV{ | 
					
						
							|  |  |  | 					ObjectName: testCases[i].object, | 
					
						
							|  |  |  | 				}, | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		return names | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objectNames := toObjectNames(testCases) | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 	_, delErrs := obj.DeleteObjects(ctx, bucketName, objectNames, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	for i := range delErrs { | 
					
						
							|  |  |  | 		if delErrs[i] != nil { | 
					
						
							|  |  |  | 			t.Errorf("Failed to remove object `%v` with the error: `%v`", objectNames[i], delErrs[i]) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for _, test := range testCases { | 
					
						
							| 
									
										
										
										
											2023-01-04 00:16:39 +08:00
										 |  |  | 		_, statErr := obj.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		switch statErr.(type) { | 
					
						
							|  |  |  | 		case ObjectNotFound: | 
					
						
							|  |  |  | 		default: | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | 			t.Fatalf("Object %s is not removed", test.bucket+SlashSeparator+test.object) | 
					
						
							| 
									
										
										
										
											2019-05-14 03:25:49 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | func TestErasureDeleteObjectDiskNotFound(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// Create an instance of xl backend.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	// Cleanup backend directories
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	defer removeRoots(fsDirs) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	opts := ObjectOptions{} | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// Create object "obj" under bucket "bucket".
 | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							| 
									
										
										
										
											2016-09-02 14:10:50 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasureDisks := xl.getDisks() | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	xl.getDisks = func() []StorageAPI { | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 		for i := range erasureDisks[:6] { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return erasureDisks | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 	if !errors.Is(err, errErasureWriteQuorum) { | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "obj" under "bucket".
 | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 	// Remove one more disk to 'lose' quorum, by taking 2 more drives offline.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasureDisks = xl.getDisks() | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	xl.getDisks = func() []StorageAPI { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		erasureDisks[7] = nil | 
					
						
							|  |  |  | 		erasureDisks[8] = nil | 
					
						
							|  |  |  | 		return erasureDisks | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) | 
					
						
							|  |  |  | 	// since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error
 | 
					
						
							| 
									
										
										
										
											2020-11-24 01:12:17 +08:00
										 |  |  | 	if !errors.Is(err, errErasureWriteQuorum) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create an instance of xl backend.
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Cleanup backend directories
 | 
					
						
							|  |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 	opts := ObjectOptions{} | 
					
						
							|  |  |  | 	// Create object "obj" under bucket "bucket".
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Upload a good object
 | 
					
						
							|  |  |  | 	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "obj" under "bucket".
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Remove disks to 'lose' quorum for object, by setting 5 to nil.
 | 
					
						
							|  |  |  | 	erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							|  |  |  | 	xl.getDisks = func() []StorageAPI { | 
					
						
							|  |  |  | 		for i := range erasureDisks[:5] { | 
					
						
							|  |  |  | 			erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return erasureDisks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							|  |  |  | 	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) | 
					
						
							|  |  |  | 	// since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error
 | 
					
						
							|  |  |  | 	if !errors.Is(err, errErasureWriteQuorum) { | 
					
						
							|  |  |  | 		t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create an instance of xl backend.
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Cleanup backend directories
 | 
					
						
							|  |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2021-05-28 02:38:09 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 	opts := ObjectOptions{} | 
					
						
							|  |  |  | 	// Create object "obj" under bucket "bucket".
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// for a 16 disk setup, EC is 4, but will be upgraded up to 8.
 | 
					
						
							|  |  |  | 	// Remove 4 disks.
 | 
					
						
							|  |  |  | 	erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							|  |  |  | 	xl.getDisks = func() []StorageAPI { | 
					
						
							|  |  |  | 		for i := range erasureDisks[:4] { | 
					
						
							|  |  |  | 			erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return erasureDisks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							|  |  |  | 	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "obj" under "bucket".
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Object was uploaded with 4 known bad drives, so we should still be able to lose 3 drives and still write to the object.
 | 
					
						
							|  |  |  | 	erasureDisks = xl.getDisks() | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							|  |  |  | 	xl.getDisks = func() []StorageAPI { | 
					
						
							|  |  |  | 		erasureDisks[7] = nil | 
					
						
							|  |  |  | 		erasureDisks[8] = nil | 
					
						
							|  |  |  | 		erasureDisks[9] = nil | 
					
						
							|  |  |  | 		return erasureDisks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							|  |  |  | 	_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{}) | 
					
						
							|  |  |  | 	// since majority of disks are available, metaquorum achieved.
 | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Errorf("Expected deleteObject to not fail, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | func TestGetObjectNoQuorum(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// Create an instance of xl backend.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	// Cleanup backend directories.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	defer removeRoots(fsDirs) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	opts := ObjectOptions{} | 
					
						
							| 
									
										
										
										
											2021-04-01 00:19:14 +08:00
										 |  |  | 	buf := make([]byte, smallFileThreshold*16) | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	if _, err = io.ReadFull(crand.Reader, buf); err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Test use case 1: All disks are online, xl.meta are present, but data are missing
 | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(buf), int64(len(buf)), "", ""), opts) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-03 02:17:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 	for _, disk := range xl.getDisks() { | 
					
						
							|  |  |  | 		files, _ := disk.ListDir(ctx, bucket, object, -1) | 
					
						
							|  |  |  | 		for _, file := range files { | 
					
						
							|  |  |  | 			if file != "xl.meta" { | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 				disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{ | 
					
						
							|  |  |  | 					Recursive: true, | 
					
						
							| 
									
										
										
										
											2023-11-29 14:35:16 +08:00
										 |  |  | 					Immediate: false, | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 				}) | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	gr, err := xl.GetObjectNInfo(ctx, bucket, object, nil, nil, opts) | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if err != toObjectErr(errErasureReadQuorum, bucket, object) { | 
					
						
							|  |  |  | 			t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if gr != nil { | 
					
						
							| 
									
										
										
										
											2022-09-20 02:05:16 +08:00
										 |  |  | 		_, err = io.Copy(io.Discard, gr) | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 		if err != toObjectErr(errErasureReadQuorum, bucket, object) { | 
					
						
							|  |  |  | 			t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		gr.Close() | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// in a 16 disk Erasure setup. The original disks are 'replaced' with
 | 
					
						
							| 
									
										
										
										
											2016-09-10 03:53:09 +08:00
										 |  |  | 	// naughtyDisks that fail after 'f' successful StorageAPI method
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// invocations, where f - [0,2)
 | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Create "object" under "bucket".
 | 
					
						
							| 
									
										
										
										
											2021-01-08 11:27:31 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(buf), int64(len(buf)), "", ""), opts) | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	for f := 0; f < 2; f++ { | 
					
						
							| 
									
										
										
										
											2016-09-10 03:53:09 +08:00
										 |  |  | 		diskErrors := make(map[int]error) | 
					
						
							|  |  |  | 		for i := 0; i <= f; i++ { | 
					
						
							|  |  |  | 			diskErrors[i] = nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 		for i := range erasureDisks[:9] { | 
					
						
							|  |  |  | 			switch diskType := erasureDisks[i].(type) { | 
					
						
							| 
									
										
										
										
											2016-09-10 03:53:09 +08:00
										 |  |  | 			case *naughtyDisk: | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			default: | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		xl.getDisks = func() []StorageAPI { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			return erasureDisks | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							| 
									
										
										
										
											2021-01-05 10:51:52 +08:00
										 |  |  | 		// Fetch object from store.
 | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		gr, err := xl.GetObjectNInfo(ctx, bucket, object, nil, nil, opts) | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			if err != toObjectErr(errErasureReadQuorum, bucket, object) { | 
					
						
							|  |  |  | 				t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if gr != nil { | 
					
						
							| 
									
										
										
										
											2022-09-20 02:05:16 +08:00
										 |  |  | 			_, err = io.Copy(io.Discard, gr) | 
					
						
							| 
									
										
										
										
											2021-05-26 07:33:06 +08:00
										 |  |  | 			if err != toObjectErr(errErasureReadQuorum, bucket, object) { | 
					
						
							|  |  |  | 				t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			gr.Close() | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func TestHeadObjectNoQuorum(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create an instance of xl backend.
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Cleanup backend directories.
 | 
					
						
							|  |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 	opts := ObjectOptions{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test use case 1: All disks are online, xl.meta are present, but data are missing
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for _, disk := range xl.getDisks() { | 
					
						
							|  |  |  | 		files, _ := disk.ListDir(ctx, bucket, object, -1) | 
					
						
							|  |  |  | 		for _, file := range files { | 
					
						
							|  |  |  | 			if file != "xl.meta" { | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 				disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{ | 
					
						
							|  |  |  | 					Recursive: true, | 
					
						
							| 
									
										
										
										
											2023-11-29 14:35:16 +08:00
										 |  |  | 					Immediate: false, | 
					
						
							| 
									
										
										
										
											2022-07-12 00:15:54 +08:00
										 |  |  | 				}) | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-03 02:35:57 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	_, err = xl.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Errorf("Expected StatObject to succeed if data dir are not found, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test use case 2: Make 9 disks offline, which leaves less than quorum number of disks
 | 
					
						
							|  |  |  | 	// in a 16 disk Erasure setup. The original disks are 'replaced' with
 | 
					
						
							|  |  |  | 	// naughtyDisks that fail after 'f' successful StorageAPI method
 | 
					
						
							|  |  |  | 	// invocations, where f - [0,2)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "object" under "bucket".
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 	for i := range erasureDisks[:10] { | 
					
						
							|  |  |  | 		erasureDisks[i] = nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							|  |  |  | 	xl.getDisks = func() []StorageAPI { | 
					
						
							|  |  |  | 		return erasureDisks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Fetch object from store.
 | 
					
						
							|  |  |  | 	_, err = xl.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	if err != toObjectErr(errErasureReadQuorum, bucket, object) { | 
					
						
							|  |  |  | 		t.Errorf("Expected getObjectInfo to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func TestPutObjectNoQuorum(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2020-04-15 08:52:38 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// Create an instance of xl backend.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	// Cleanup backend directories.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	opts := ObjectOptions{} | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	// Create "object" under "bucket".
 | 
					
						
							| 
									
										
										
										
											2021-04-01 00:19:14 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{'a'}, smallFileThreshold*16)), smallFileThreshold*16, "", ""), opts) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Make 9 disks offline, which leaves less than quorum number of disks
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// in a 16 disk Erasure setup. The original disks are 'replaced' with
 | 
					
						
							| 
									
										
										
										
											2016-09-10 03:53:09 +08:00
										 |  |  | 	// naughtyDisks that fail after 'f' successful StorageAPI method
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	// invocations, where f - [0,4)
 | 
					
						
							| 
									
										
										
										
											2021-04-21 01:44:39 +08:00
										 |  |  | 	for f := 0; f < 2; f++ { | 
					
						
							| 
									
										
										
										
											2016-09-10 03:53:09 +08:00
										 |  |  | 		diskErrors := make(map[int]error) | 
					
						
							|  |  |  | 		for i := 0; i <= f; i++ { | 
					
						
							|  |  |  | 			diskErrors[i] = nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 		for i := range erasureDisks[:9] { | 
					
						
							|  |  |  | 			switch diskType := erasureDisks[i].(type) { | 
					
						
							| 
									
										
										
										
											2016-09-10 03:53:09 +08:00
										 |  |  | 			case *naughtyDisk: | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) | 
					
						
							| 
									
										
										
										
											2018-02-16 09:45:57 +08:00
										 |  |  | 			default: | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		xl.getDisks = func() []StorageAPI { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			return erasureDisks | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 		// Upload new content to same object "object"
 | 
					
						
							| 
									
										
										
										
											2021-04-01 00:19:14 +08:00
										 |  |  | 		_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{byte(f)}, smallFileThreshold*16)), smallFileThreshold*16, "", ""), opts) | 
					
						
							| 
									
										
										
										
											2020-11-24 01:12:17 +08:00
										 |  |  | 		if !errors.Is(err, errErasureWriteQuorum) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) | 
					
						
							| 
									
										
										
										
											2016-07-22 04:15:54 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2016-10-09 08:08:17 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | func TestPutObjectNoQuorumSmall(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create an instance of xl backend.
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure16(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Cleanup backend directories.
 | 
					
						
							|  |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, "bucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2021-03-30 08:00:55 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 	opts := ObjectOptions{} | 
					
						
							|  |  |  | 	// Create "object" under "bucket".
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{'a'}, smallFileThreshold/2)), smallFileThreshold/2, "", ""), opts) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Make 9 disks offline, which leaves less than quorum number of disks
 | 
					
						
							|  |  |  | 	// in a 16 disk Erasure setup. The original disks are 'replaced' with
 | 
					
						
							|  |  |  | 	// naughtyDisks that fail after 'f' successful StorageAPI method
 | 
					
						
							|  |  |  | 	// invocations, where f - [0,2)
 | 
					
						
							|  |  |  | 	for f := 0; f < 2; f++ { | 
					
						
							|  |  |  | 		t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) { | 
					
						
							|  |  |  | 			diskErrors := make(map[int]error) | 
					
						
							|  |  |  | 			for i := 0; i <= f; i++ { | 
					
						
							|  |  |  | 				diskErrors[i] = nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 			for i := range erasureDisks[:9] { | 
					
						
							|  |  |  | 				switch diskType := erasureDisks[i].(type) { | 
					
						
							|  |  |  | 				case *naughtyDisk: | 
					
						
							|  |  |  | 					erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) | 
					
						
							|  |  |  | 				default: | 
					
						
							|  |  |  | 					erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			z.serverPools[0].erasureDisksMu.Lock() | 
					
						
							|  |  |  | 			xl.getDisks = func() []StorageAPI { | 
					
						
							|  |  |  | 				return erasureDisks | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			z.serverPools[0].erasureDisksMu.Unlock() | 
					
						
							|  |  |  | 			// Upload new content to same object "object"
 | 
					
						
							|  |  |  | 			_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bytes.Repeat([]byte{byte(f)}, smallFileThreshold/2)), smallFileThreshold/2, "", ""), opts) | 
					
						
							|  |  |  | 			if !errors.Is(err, errErasureWriteQuorum) { | 
					
						
							|  |  |  | 				t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-26 23:09:23 +08:00
										 |  |  | // Test PutObject twice, one small and another bigger
 | 
					
						
							|  |  |  | // than small data thresold and checks reading them again
 | 
					
						
							|  |  |  | func TestPutObjectSmallInlineData(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	const numberOfDisks = 4 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create an instance of xl backend.
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure(ctx, numberOfDisks) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Cleanup backend directories.
 | 
					
						
							|  |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket := "bucket" | 
					
						
							|  |  |  | 	object := "object" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create "bucket"
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2021-05-26 23:09:23 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test: Upload a small file and read it.
 | 
					
						
							|  |  |  | 	smallData := []byte{'a'} | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(smallData), int64(len(smallData)), "", ""), ObjectOptions{}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	gr, err := obj.GetObjectNInfo(ctx, bucket, object, nil, nil, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2021-05-26 23:09:23 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Expected GetObject to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	output := bytes.NewBuffer([]byte{}) | 
					
						
							|  |  |  | 	_, err = io.Copy(output, gr) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Expected GetObject reading data to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	gr.Close() | 
					
						
							|  |  |  | 	if !bytes.Equal(output.Bytes(), smallData) { | 
					
						
							|  |  |  | 		t.Fatalf("Corrupted data is found") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test: Upload a file bigger than the small file threshold
 | 
					
						
							|  |  |  | 	// under the same bucket & key name and try to read it again.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	output.Reset() | 
					
						
							|  |  |  | 	bigData := bytes.Repeat([]byte{'b'}, smallFileThreshold*numberOfDisks/2) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(bigData), int64(len(bigData)), "", ""), ObjectOptions{}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	gr, err = obj.GetObjectNInfo(ctx, bucket, object, nil, nil, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2021-05-26 23:09:23 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Expected GetObject to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	_, err = io.Copy(output, gr) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Expected GetObject reading data to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	gr.Close() | 
					
						
							|  |  |  | 	if !bytes.Equal(output.Bytes(), bigData) { | 
					
						
							|  |  |  | 		t.Fatalf("Corrupted data found") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | func TestObjectQuorumFromMeta(t *testing.T) { | 
					
						
							|  |  |  | 	ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { | 
					
						
							|  |  |  | 	bucket := getRandomBucketName() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var opts ObjectOptions | 
					
						
							|  |  |  | 	// make data with more than one part
 | 
					
						
							|  |  |  | 	partCount := 3 | 
					
						
							| 
									
										
										
										
											2019-11-20 09:42:27 +08:00
										 |  |  | 	data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	xl := z.serverPools[0].sets[0] | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	erasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(GlobalContext) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err := obj.MakeBucket(ctx, bucket, MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to make a bucket %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
 | 
					
						
							|  |  |  | 	object1 := "object1" | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	globalStorageClass.Update(storageclass.Config{ | 
					
						
							| 
									
										
										
										
											2022-06-28 11:22:18 +08:00
										 |  |  | 		RRS: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 2, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		Standard: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 4, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "", false, false) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parts1SC := globalStorageClass | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
 | 
					
						
							|  |  |  | 	object2 := "object2" | 
					
						
							|  |  |  | 	metadata2 := make(map[string]string) | 
					
						
							|  |  |  | 	metadata2["x-amz-storage-class"] = storageclass.RRS | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "", false, false) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parts2SC := globalStorageClass | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
 | 
					
						
							|  |  |  | 	object3 := "object3" | 
					
						
							|  |  |  | 	metadata3 := make(map[string]string) | 
					
						
							|  |  |  | 	metadata3["x-amz-storage-class"] = storageclass.STANDARD | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "", false, false) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parts3SC := globalStorageClass | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
 | 
					
						
							|  |  |  | 	object4 := "object4" | 
					
						
							|  |  |  | 	metadata4 := make(map[string]string) | 
					
						
							|  |  |  | 	metadata4["x-amz-storage-class"] = storageclass.STANDARD | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	globalStorageClass.Update(storageclass.Config{ | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 		Standard: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 6, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "", false, false) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parts4SC := storageclass.Config{ | 
					
						
							|  |  |  | 		Standard: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 6, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
 | 
					
						
							|  |  |  | 	// Reset global storage class flags
 | 
					
						
							|  |  |  | 	object5 := "object5" | 
					
						
							|  |  |  | 	metadata5 := make(map[string]string) | 
					
						
							|  |  |  | 	metadata5["x-amz-storage-class"] = storageclass.RRS | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	globalStorageClass.Update(storageclass.Config{ | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 		RRS: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 2, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "", false, false) | 
					
						
							| 
									
										
										
										
											2022-06-28 11:22:18 +08:00
										 |  |  | 	parts5SC := globalStorageClass | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
 | 
					
						
							|  |  |  | 	object6 := "object6" | 
					
						
							|  |  |  | 	metadata6 := make(map[string]string) | 
					
						
							|  |  |  | 	metadata6["x-amz-storage-class"] = storageclass.STANDARD | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	globalStorageClass.Update(storageclass.Config{ | 
					
						
							| 
									
										
										
										
											2022-06-28 11:22:18 +08:00
										 |  |  | 		Standard: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 4, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 		RRS: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 2, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "", false, false) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parts6SC := storageclass.Config{ | 
					
						
							|  |  |  | 		RRS: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 2, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
 | 
					
						
							|  |  |  | 	// Reset global storage class flags
 | 
					
						
							|  |  |  | 	object7 := "object7" | 
					
						
							|  |  |  | 	metadata7 := make(map[string]string) | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 	metadata7["x-amz-storage-class"] = storageclass.STANDARD | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	globalStorageClass.Update(storageclass.Config{ | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 		Standard: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 5, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	_, err = obj.PutObject(ctx, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Failed to putObject %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-11-21 13:33:47 +08:00
										 |  |  | 	parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "", false, false) | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 	parts7SC := storageclass.Config{ | 
					
						
							|  |  |  | 		Standard: storageclass.StorageClass{ | 
					
						
							|  |  |  | 			Parity: 5, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	tests := []struct { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		parts               []FileInfo | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 		errs                []error | 
					
						
							|  |  |  | 		expectedReadQuorum  int | 
					
						
							|  |  |  | 		expectedWriteQuorum int | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 		storageClassCfg     storageclass.Config | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 		expectedError       error | 
					
						
							|  |  |  | 	}{ | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 		{parts1, errs1, 12, 12, parts1SC, nil}, | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 		{parts2, errs2, 14, 14, parts2SC, nil}, | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 		{parts3, errs3, 12, 12, parts3SC, nil}, | 
					
						
							| 
									
										
										
										
											2020-08-04 03:15:08 +08:00
										 |  |  | 		{parts4, errs4, 10, 10, parts4SC, nil}, | 
					
						
							|  |  |  | 		{parts5, errs5, 14, 14, parts5SC, nil}, | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 		{parts6, errs6, 12, 12, parts6SC, nil}, | 
					
						
							|  |  |  | 		{parts7, errs7, 11, 11, parts7SC, nil}, | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for _, tt := range tests { | 
					
						
							|  |  |  | 		tt := tt | 
					
						
							|  |  |  | 		t.(*testing.T).Run("", func(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2023-05-05 05:44:30 +08:00
										 |  |  | 			globalStorageClass.Update(tt.storageClassCfg) | 
					
						
							| 
									
										
										
										
											2022-06-28 11:22:18 +08:00
										 |  |  | 			actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks))) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			if tt.expectedError != nil && err == nil { | 
					
						
							|  |  |  | 				t.Errorf("Expected %s, got %s", tt.expectedError, err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if tt.expectedError == nil && err != nil { | 
					
						
							|  |  |  | 				t.Errorf("Expected %s, got %s", tt.expectedError, err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if tt.expectedReadQuorum != actualReadQuorum { | 
					
						
							|  |  |  | 				t.Errorf("Expected Read Quorum %d, got %d", tt.expectedReadQuorum, actualReadQuorum) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if tt.expectedWriteQuorum != actualWriteQuorum { | 
					
						
							|  |  |  | 				t.Errorf("Expected Write Quorum %d, got %d", tt.expectedWriteQuorum, actualWriteQuorum) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // In some deployments, one object has data inlined in one disk and not inlined in other disks.
 | 
					
						
							|  |  |  | func TestGetObjectInlineNotInline(t *testing.T) { | 
					
						
							|  |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create a backend with 4 disks named disk{1...4}, this name convention
 | 
					
						
							|  |  |  | 	// because we will unzip some object data from a sample archive.
 | 
					
						
							|  |  |  | 	const numDisks = 4 | 
					
						
							| 
									
										
										
										
											2022-07-26 03:37:26 +08:00
										 |  |  | 	path := t.TempDir() | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	var fsDirs []string | 
					
						
							|  |  |  | 	for i := 1; i <= numDisks; i++ { | 
					
						
							|  |  |  | 		fsDirs = append(fsDirs, filepath.Join(path, fmt.Sprintf("disk%d", i))) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-07-04 00:47:40 +08:00
										 |  |  | 	objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(0, fsDirs...)) | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		removeRoots(fsDirs) | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// cleaning up of temporary test directories
 | 
					
						
							|  |  |  | 	defer objLayer.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create a testbucket
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 	err = objLayer.MakeBucket(ctx, "testbucket", MakeBucketOptions{}) | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Unzip sample object data to the existing disks
 | 
					
						
							|  |  |  | 	err = unzipArchive("testdata/xl-meta-inline-notinline.zip", path) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Try to read the object and check its md5sum
 | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	gr, err := objLayer.GetObjectNInfo(ctx, "testbucket", "file", nil, nil, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2022-05-24 21:26:38 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Expected GetObject to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	h := md5.New() | 
					
						
							|  |  |  | 	_, err = io.Copy(h, gr) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatalf("Expected GetObject reading data to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	gr.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	const expectedHash = "fffb6377948ebea75ad2b8058e849ef5" | 
					
						
							|  |  |  | 	foundHash := fmt.Sprintf("%x", h.Sum(nil)) | 
					
						
							|  |  |  | 	if foundHash != expectedHash { | 
					
						
							|  |  |  | 		t.Fatalf("Expected data to have md5sum = `%s`, found `%s`", expectedHash, foundHash) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2022-05-31 08:52:59 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // Test reading an object with some outdated data in some disks
 | 
					
						
							|  |  |  | func TestGetObjectWithOutdatedDisks(t *testing.T) { | 
					
						
							| 
									
										
										
										
											2022-11-07 16:11:21 +08:00
										 |  |  | 	if runtime.GOOS == globalWindowsOSName { | 
					
						
							|  |  |  | 		t.Skip() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-05-31 08:52:59 +08:00
										 |  |  | 	ctx, cancel := context.WithCancel(context.Background()) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Create an instance of xl backend.
 | 
					
						
							|  |  |  | 	obj, fsDirs, err := prepareErasure(ctx, 6) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Cleanup backend directories.
 | 
					
						
							|  |  |  | 	defer obj.Shutdown(context.Background()) | 
					
						
							|  |  |  | 	defer removeRoots(fsDirs) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	z := obj.(*erasureServerPools) | 
					
						
							|  |  |  | 	sets := z.serverPools[0] | 
					
						
							|  |  |  | 	xl := sets.sets[0] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	origErasureDisks := xl.getDisks() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	testCases := []struct { | 
					
						
							|  |  |  | 		bucket    string | 
					
						
							|  |  |  | 		versioned bool | 
					
						
							|  |  |  | 		object    string | 
					
						
							|  |  |  | 		content   []byte | 
					
						
							|  |  |  | 	}{ | 
					
						
							|  |  |  | 		{"bucket1", false, "object1", []byte("aaaaaaaaaaaaaaaa")}, | 
					
						
							|  |  |  | 		{"bucket2", false, "object2", bytes.Repeat([]byte{'a'}, smallFileThreshold*2)}, | 
					
						
							|  |  |  | 		{"bucket3", true, "version1", []byte("aaaaaaaaaaaaaaaa")}, | 
					
						
							|  |  |  | 		{"bucket4", true, "version2", bytes.Repeat([]byte{'a'}, smallFileThreshold*2)}, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, testCase := range testCases { | 
					
						
							|  |  |  | 		// Step 1: create a bucket
 | 
					
						
							| 
									
										
										
										
											2022-12-23 23:46:00 +08:00
										 |  |  | 		err = z.MakeBucket(ctx, testCase.bucket, MakeBucketOptions{VersioningEnabled: testCase.versioned}) | 
					
						
							| 
									
										
										
										
											2022-05-31 08:52:59 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatalf("Test %d: Failed to create a bucket: %v", i+1, err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Step 2: Upload an object with a random content
 | 
					
						
							|  |  |  | 		initialData := bytes.Repeat([]byte{'b'}, len(testCase.content)) | 
					
						
							|  |  |  | 		sets.erasureDisksMu.Lock() | 
					
						
							|  |  |  | 		xl.getDisks = func() []StorageAPI { return origErasureDisks } | 
					
						
							|  |  |  | 		sets.erasureDisksMu.Unlock() | 
					
						
							|  |  |  | 		_, err = z.PutObject(ctx, testCase.bucket, testCase.object, mustGetPutObjReader(t, bytes.NewReader(initialData), int64(len(initialData)), "", ""), | 
					
						
							|  |  |  | 			ObjectOptions{Versioned: testCase.versioned}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatalf("Test %d: Failed to upload a random object: %v", i+1, err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Step 3: Upload the object with some disks offline
 | 
					
						
							|  |  |  | 		sets.erasureDisksMu.Lock() | 
					
						
							|  |  |  | 		xl.getDisks = func() []StorageAPI { | 
					
						
							|  |  |  | 			disks := make([]StorageAPI, len(origErasureDisks)) | 
					
						
							|  |  |  | 			copy(disks, origErasureDisks) | 
					
						
							|  |  |  | 			disks[0] = nil | 
					
						
							|  |  |  | 			disks[1] = nil | 
					
						
							|  |  |  | 			return disks | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		sets.erasureDisksMu.Unlock() | 
					
						
							| 
									
										
										
										
											2022-09-14 23:17:39 +08:00
										 |  |  | 		got, err := z.PutObject(ctx, testCase.bucket, testCase.object, mustGetPutObjReader(t, bytes.NewReader(testCase.content), int64(len(testCase.content)), "", ""), | 
					
						
							| 
									
										
										
										
											2022-05-31 08:52:59 +08:00
										 |  |  | 			ObjectOptions{Versioned: testCase.versioned}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatalf("Test %d: Failed to upload the final object: %v", i+1, err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Step 4: Try to read the object back and check its md5sum
 | 
					
						
							|  |  |  | 		sets.erasureDisksMu.Lock() | 
					
						
							|  |  |  | 		xl.getDisks = func() []StorageAPI { return origErasureDisks } | 
					
						
							|  |  |  | 		sets.erasureDisksMu.Unlock() | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		gr, err := z.GetObjectNInfo(ctx, testCase.bucket, testCase.object, nil, nil, ObjectOptions{VersionID: got.VersionID}) | 
					
						
							| 
									
										
										
										
											2022-05-31 08:52:59 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatalf("Expected GetObject to succeed, but failed with %v", err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		h := md5.New() | 
					
						
							|  |  |  | 		h.Write(testCase.content) | 
					
						
							|  |  |  | 		expectedHash := h.Sum(nil) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		h.Reset() | 
					
						
							|  |  |  | 		_, err = io.Copy(h, gr) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			t.Fatalf("Test %d: Failed to calculate md5sum of the object: %v", i+1, err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		gr.Close() | 
					
						
							|  |  |  | 		foundHash := h.Sum(nil) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if !bytes.Equal(foundHash, expectedHash) { | 
					
						
							|  |  |  | 			t.Fatalf("Test %d: Expected data to have md5sum = `%x`, found `%x`", i+1, expectedHash, foundHash) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } |