| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Minio Cloud Storage, (C) 2016 Minio, Inc. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"bytes" | 
					
						
							|  |  |  | 	"crypto/md5" | 
					
						
							|  |  |  | 	"encoding/hex" | 
					
						
							|  |  |  | 	"os" | 
					
						
							|  |  |  | 	"path/filepath" | 
					
						
							|  |  |  | 	"reflect" | 
					
						
							|  |  |  | 	"testing" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // TestNewMultipartUploadFaultyDisk - test NewMultipartUpload with faulty disks
 | 
					
						
							|  |  |  | func TestNewMultipartUploadFaultyDisk(t *testing.T) { | 
					
						
							|  |  |  | 	// Prepare for tests
 | 
					
						
							|  |  |  | 	disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) | 
					
						
							|  |  |  | 	defer removeAll(disk) | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	obj := initFSObjects(disk, t) | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	fs := obj.(fsObjects) | 
					
						
							|  |  |  | 	bucketName := "bucket" | 
					
						
							|  |  |  | 	objectName := "object" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err := obj.MakeBucket(bucketName); err != nil { | 
					
						
							|  |  |  | 		t.Fatal("Cannot create bucket, err: ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Test with faulty disk
 | 
					
						
							|  |  |  | 	fsStorage := fs.storage.(*posix) | 
					
						
							|  |  |  | 	for i := 1; i <= 5; i++ { | 
					
						
							|  |  |  | 		// Faulty disk generates errFaultyDisk at 'i' storage api call number
 | 
					
						
							|  |  |  | 		fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) | 
					
						
							|  |  |  | 		if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); errorCause(err) != errFaultyDisk { | 
					
						
							|  |  |  | 			switch i { | 
					
						
							|  |  |  | 			case 1: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), BucketNotFound{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // TestPutObjectPartFaultyDisk - test PutObjectPart with faulty disks
 | 
					
						
							|  |  |  | func TestPutObjectPartFaultyDisk(t *testing.T) { | 
					
						
							|  |  |  | 	// Prepare for tests
 | 
					
						
							|  |  |  | 	disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) | 
					
						
							|  |  |  | 	defer removeAll(disk) | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	obj := initFSObjects(disk, t) | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 	fs := obj.(fsObjects) | 
					
						
							|  |  |  | 	bucketName := "bucket" | 
					
						
							|  |  |  | 	objectName := "object" | 
					
						
							|  |  |  | 	data := []byte("12345") | 
					
						
							|  |  |  | 	dataLen := int64(len(data)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	if err := obj.MakeBucket(bucketName); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 		t.Fatal("Cannot create bucket, err: ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	md5Writer := md5.New() | 
					
						
							|  |  |  | 	md5Writer.Write(data) | 
					
						
							|  |  |  | 	md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) | 
					
						
							| 
									
										
										
										
											2016-10-03 06:51:49 +08:00
										 |  |  | 	sha256sum := "" | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Test with faulty disk
 | 
					
						
							|  |  |  | 	fsStorage := fs.storage.(*posix) | 
					
						
							|  |  |  | 	for i := 1; i <= 7; i++ { | 
					
						
							|  |  |  | 		// Faulty disk generates errFaultyDisk at 'i' storage api call number
 | 
					
						
							|  |  |  | 		fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) | 
					
						
							| 
									
										
										
										
											2016-10-03 06:51:49 +08:00
										 |  |  | 		if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, dataLen, bytes.NewReader(data), md5Hex, sha256sum); errorCause(err) != errFaultyDisk { | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 			switch i { | 
					
						
							|  |  |  | 			case 1: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), BucketNotFound{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			case 2, 4: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), InvalidUploadID{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk)) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // TestCompleteMultipartUploadFaultyDisk - test CompleteMultipartUpload with faulty disks
 | 
					
						
							|  |  |  | func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { | 
					
						
							|  |  |  | 	// Prepare for tests
 | 
					
						
							|  |  |  | 	disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) | 
					
						
							|  |  |  | 	defer removeAll(disk) | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	obj := initFSObjects(disk, t) | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	fs := obj.(fsObjects) | 
					
						
							|  |  |  | 	bucketName := "bucket" | 
					
						
							|  |  |  | 	objectName := "object" | 
					
						
							|  |  |  | 	data := []byte("12345") | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	if err := obj.MakeBucket(bucketName); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 		t.Fatal("Cannot create bucket, err: ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	md5Writer := md5.New() | 
					
						
							|  |  |  | 	md5Writer.Write(data) | 
					
						
							|  |  |  | 	md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) | 
					
						
							| 
									
										
										
										
											2016-10-03 06:51:49 +08:00
										 |  |  | 	sha256sum := "" | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-03 06:51:49 +08:00
										 |  |  | 	if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 		t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	parts := []completePart{{PartNumber: 1, ETag: md5Hex}} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fsStorage := fs.storage.(*posix) | 
					
						
							|  |  |  | 	for i := 1; i <= 3; i++ { | 
					
						
							|  |  |  | 		// Faulty disk generates errFaultyDisk at 'i' storage api call number
 | 
					
						
							|  |  |  | 		fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) | 
					
						
							|  |  |  | 		if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); errorCause(err) != errFaultyDisk { | 
					
						
							|  |  |  | 			switch i { | 
					
						
							|  |  |  | 			case 1: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), BucketNotFound{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			case 2: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), InvalidUploadID{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk)) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // TestListMultipartUploadsFaultyDisk - test ListMultipartUploads with faulty disks
 | 
					
						
							|  |  |  | func TestListMultipartUploadsFaultyDisk(t *testing.T) { | 
					
						
							|  |  |  | 	// Prepare for tests
 | 
					
						
							|  |  |  | 	disk := filepath.Join(os.TempDir(), "minio-"+nextSuffix()) | 
					
						
							|  |  |  | 	defer removeAll(disk) | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	obj := initFSObjects(disk, t) | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 	fs := obj.(fsObjects) | 
					
						
							|  |  |  | 	bucketName := "bucket" | 
					
						
							|  |  |  | 	objectName := "object" | 
					
						
							|  |  |  | 	data := []byte("12345") | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-06 03:48:07 +08:00
										 |  |  | 	if err := obj.MakeBucket(bucketName); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 		t.Fatal("Cannot create bucket, err: ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	md5Writer := md5.New() | 
					
						
							|  |  |  | 	md5Writer.Write(data) | 
					
						
							|  |  |  | 	md5Hex := hex.EncodeToString(md5Writer.Sum(nil)) | 
					
						
							| 
									
										
										
										
											2016-10-03 06:51:49 +08:00
										 |  |  | 	sha256sum := "" | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-03 06:51:49 +08:00
										 |  |  | 	if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil { | 
					
						
							| 
									
										
										
										
											2016-09-17 04:06:49 +08:00
										 |  |  | 		t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fsStorage := fs.storage.(*posix) | 
					
						
							|  |  |  | 	for i := 1; i <= 4; i++ { | 
					
						
							|  |  |  | 		// Faulty disk generates errFaultyDisk at 'i' storage api call number
 | 
					
						
							|  |  |  | 		fs.storage = newNaughtyDisk(fsStorage, map[int]error{i: errFaultyDisk}, nil) | 
					
						
							|  |  |  | 		if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); errorCause(err) != errFaultyDisk { | 
					
						
							|  |  |  | 			switch i { | 
					
						
							|  |  |  | 			case 1: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), BucketNotFound{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			case 2: | 
					
						
							|  |  |  | 				if !isSameType(errorCause(err), InvalidUploadID{}) { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			case 3: | 
					
						
							|  |  |  | 				if errorCause(err) != errFileNotFound { | 
					
						
							|  |  |  | 					t.Fatal("Unexpected error ", err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				t.Fatal("Unexpected error ", i, err, reflect.TypeOf(errorCause(err)), reflect.TypeOf(errFaultyDisk)) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } |