| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  |  * MinIO Cloud Storage, (C) 2015-2019 MinIO, Inc. | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | 	"encoding/hex" | 
					
						
							|  |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2018-05-16 09:20:22 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2018-12-19 21:13:47 +08:00
										 |  |  | 	"net" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2016-05-09 01:15:34 +08:00
										 |  |  | 	"path" | 
					
						
							| 
									
										
										
										
											2017-02-04 15:27:50 +08:00
										 |  |  | 	"runtime" | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2016-04-14 02:32:47 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2018-05-16 09:20:22 +08:00
										 |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | 	"unicode/utf8" | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	snappy "github.com/golang/snappy" | 
					
						
							| 
									
										
										
										
											2019-05-30 07:35:12 +08:00
										 |  |  | 	"github.com/minio/minio-go/v6/pkg/s3utils" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/crypto" | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2018-05-12 03:02:30 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/dns" | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/hash" | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/ioutil" | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/wildcard" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"github.com/skyrings/skyring-common/tools/uuid" | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const ( | 
					
						
							| 
									
										
										
										
											2019-04-10 02:39:42 +08:00
										 |  |  | 	// MinIO meta bucket.
 | 
					
						
							| 
									
										
										
										
											2016-07-13 06:21:29 +08:00
										 |  |  | 	minioMetaBucket = ".minio.sys" | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 	// Multipart meta prefix.
 | 
					
						
							|  |  |  | 	mpartMetaPrefix = "multipart" | 
					
						
							| 
									
										
										
										
											2019-04-10 02:39:42 +08:00
										 |  |  | 	// MinIO Multipart meta prefix.
 | 
					
						
							| 
									
										
										
										
											2016-11-23 05:15:06 +08:00
										 |  |  | 	minioMetaMultipartBucket = minioMetaBucket + "/" + mpartMetaPrefix | 
					
						
							| 
									
										
										
										
											2019-04-10 02:39:42 +08:00
										 |  |  | 	// MinIO Tmp meta prefix.
 | 
					
						
							| 
									
										
										
										
											2016-11-21 06:25:43 +08:00
										 |  |  | 	minioMetaTmpBucket = minioMetaBucket + "/tmp" | 
					
						
							| 
									
										
										
										
											2017-03-04 02:23:41 +08:00
										 |  |  | 	// DNS separator (period), used for bucket name validation.
 | 
					
						
							|  |  |  | 	dnsDelimiter = "." | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-10 02:39:42 +08:00
										 |  |  | // isMinioBucket returns true if given bucket is a MinIO internal
 | 
					
						
							| 
									
										
										
										
											2017-01-18 02:02:58 +08:00
										 |  |  | // bucket and false otherwise.
 | 
					
						
							|  |  |  | func isMinioMetaBucketName(bucket string) bool { | 
					
						
							|  |  |  | 	return bucket == minioMetaBucket || | 
					
						
							|  |  |  | 		bucket == minioMetaMultipartBucket || | 
					
						
							|  |  |  | 		bucket == minioMetaTmpBucket | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-04 02:23:41 +08:00
										 |  |  | // IsValidBucketName verifies that a bucket name is in accordance with
 | 
					
						
							|  |  |  | // Amazon's requirements (i.e. DNS naming conventions). It must be 3-63
 | 
					
						
							|  |  |  | // characters long, and it must be a sequence of one or more labels
 | 
					
						
							|  |  |  | // separated by periods. Each label can contain lowercase ascii
 | 
					
						
							|  |  |  | // letters, decimal digits and hyphens, but must not begin or end with
 | 
					
						
							|  |  |  | // a hyphen. See:
 | 
					
						
							|  |  |  | // http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
 | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | func IsValidBucketName(bucket string) bool { | 
					
						
							| 
									
										
										
										
											2017-01-18 02:02:58 +08:00
										 |  |  | 	// Special case when bucket is equal to one of the meta buckets.
 | 
					
						
							|  |  |  | 	if isMinioMetaBucketName(bucket) { | 
					
						
							| 
									
										
										
										
											2016-07-24 13:51:12 +08:00
										 |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | 	if len(bucket) < 3 || len(bucket) > 63 { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-04 02:23:41 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Split on dot and check each piece conforms to rules.
 | 
					
						
							|  |  |  | 	allNumbers := true | 
					
						
							|  |  |  | 	pieces := strings.Split(bucket, dnsDelimiter) | 
					
						
							|  |  |  | 	for _, piece := range pieces { | 
					
						
							|  |  |  | 		if len(piece) == 0 || piece[0] == '-' || | 
					
						
							|  |  |  | 			piece[len(piece)-1] == '-' { | 
					
						
							|  |  |  | 			// Current piece has 0-length or starts or
 | 
					
						
							|  |  |  | 			// ends with a hyphen.
 | 
					
						
							|  |  |  | 			return false | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Now only need to check if each piece is a valid
 | 
					
						
							|  |  |  | 		// 'label' in AWS terminology and if the bucket looks
 | 
					
						
							|  |  |  | 		// like an IP address.
 | 
					
						
							|  |  |  | 		isNotNumber := false | 
					
						
							|  |  |  | 		for i := 0; i < len(piece); i++ { | 
					
						
							|  |  |  | 			switch { | 
					
						
							|  |  |  | 			case (piece[i] >= 'a' && piece[i] <= 'z' || | 
					
						
							|  |  |  | 				piece[i] == '-'): | 
					
						
							|  |  |  | 				// Found a non-digit character, so
 | 
					
						
							|  |  |  | 				// this piece is not a number.
 | 
					
						
							|  |  |  | 				isNotNumber = true | 
					
						
							|  |  |  | 			case piece[i] >= '0' && piece[i] <= '9': | 
					
						
							|  |  |  | 				// Nothing to do.
 | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				// Found invalid character.
 | 
					
						
							|  |  |  | 				return false | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		allNumbers = allNumbers && !isNotNumber | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-04 02:23:41 +08:00
										 |  |  | 	// Does the bucket name look like an IP address?
 | 
					
						
							|  |  |  | 	return !(len(pieces) == 4 && allNumbers) | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-03-08 11:30:30 +08:00
										 |  |  | // IsValidObjectName verifies an object name in accordance with Amazon's
 | 
					
						
							|  |  |  | // requirements. It cannot exceed 1024 characters and must be a valid UTF8
 | 
					
						
							|  |  |  | // string.
 | 
					
						
							| 
									
										
										
										
											2016-04-14 02:32:47 +08:00
										 |  |  | //
 | 
					
						
							|  |  |  | // See:
 | 
					
						
							|  |  |  | // http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should avoid the following characters in a key name because of
 | 
					
						
							|  |  |  | // significant special handling for consistency across all
 | 
					
						
							|  |  |  | // applications.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Rejects strings with following characters.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // - Backslash ("\")
 | 
					
						
							| 
									
										
										
										
											2016-05-14 02:43:06 +08:00
										 |  |  | //
 | 
					
						
							| 
									
										
										
										
											2016-10-01 07:56:36 +08:00
										 |  |  | // additionally minio does not support object names with trailing "/".
 | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | func IsValidObjectName(object string) bool { | 
					
						
							| 
									
										
										
										
											2016-05-14 02:43:06 +08:00
										 |  |  | 	if len(object) == 0 { | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-05-10 05:32:24 +08:00
										 |  |  | 	if hasSuffix(object, slashSeparator) || hasPrefix(object, slashSeparator) { | 
					
						
							| 
									
										
										
										
											2016-05-14 02:43:06 +08:00
										 |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return IsValidObjectPrefix(object) | 
					
						
							| 
									
										
										
										
											2016-02-20 08:04:29 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2016-03-23 07:03:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // IsValidObjectPrefix verifies whether the prefix is a valid object name.
 | 
					
						
							|  |  |  | // Its valid to have a empty prefix.
 | 
					
						
							|  |  |  | func IsValidObjectPrefix(object string) bool { | 
					
						
							| 
									
										
										
										
											2017-04-25 09:13:46 +08:00
										 |  |  | 	if hasBadPathComponent(object) { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-14 02:43:06 +08:00
										 |  |  | 	if len(object) > 1024 { | 
					
						
							|  |  |  | 		return false | 
					
						
							| 
									
										
										
										
											2016-03-23 07:03:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-14 02:43:06 +08:00
										 |  |  | 	if !utf8.ValidString(object) { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Reject unsupported characters in object name.
 | 
					
						
							| 
									
										
										
										
											2016-10-01 07:56:36 +08:00
										 |  |  | 	if strings.ContainsAny(object, "\\") { | 
					
						
							| 
									
										
										
										
											2016-05-14 02:43:06 +08:00
										 |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return true | 
					
						
							| 
									
										
										
										
											2016-03-23 07:03:08 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2016-04-17 02:43:03 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-22 09:05:26 +08:00
										 |  |  | // Slash separator.
 | 
					
						
							|  |  |  | const slashSeparator = "/" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // retainSlash - retains slash from a path.
 | 
					
						
							|  |  |  | func retainSlash(s string) string { | 
					
						
							|  |  |  | 	return strings.TrimSuffix(s, slashSeparator) + slashSeparator | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-09 01:15:34 +08:00
										 |  |  | // pathJoin - like path.Join() but retains trailing "/" of the last element
 | 
					
						
							| 
									
										
										
										
											2016-05-09 15:46:54 +08:00
										 |  |  | func pathJoin(elem ...string) string { | 
					
						
							| 
									
										
										
										
											2016-05-09 01:15:34 +08:00
										 |  |  | 	trailingSlash := "" | 
					
						
							| 
									
										
										
										
											2016-05-09 15:46:54 +08:00
										 |  |  | 	if len(elem) > 0 { | 
					
						
							| 
									
										
										
										
											2017-02-17 06:52:14 +08:00
										 |  |  | 		if hasSuffix(elem[len(elem)-1], slashSeparator) { | 
					
						
							| 
									
										
										
										
											2016-05-09 15:46:54 +08:00
										 |  |  | 			trailingSlash = "/" | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-05-09 01:15:34 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-09 15:46:54 +08:00
										 |  |  | 	return path.Join(elem...) + trailingSlash | 
					
						
							| 
									
										
										
										
											2016-04-17 02:43:03 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-23 08:52:37 +08:00
										 |  |  | // mustGetUUID - get a random UUID.
 | 
					
						
							|  |  |  | func mustGetUUID() string { | 
					
						
							|  |  |  | 	uuid, err := uuid.New() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-20 08:24:43 +08:00
										 |  |  | 		logger.CriticalIf(context.Background(), err) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-23 08:52:37 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return uuid.String() | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | // Create an s3 compatible MD5sum for complete multipart transaction.
 | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  | func getCompleteMultipartMD5(parts []CompletePart) string { | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | 	var finalMD5Bytes []byte | 
					
						
							| 
									
										
										
										
											2016-05-08 17:38:35 +08:00
										 |  |  | 	for _, part := range parts { | 
					
						
							| 
									
										
										
										
											2019-04-02 03:19:52 +08:00
										 |  |  | 		md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  | 			finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-22 05:51:05 +08:00
										 |  |  | 	s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts)) | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  | 	return s3MD5 | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-04 14:14:45 +08:00
										 |  |  | // Clean unwanted fields from metadata
 | 
					
						
							|  |  |  | func cleanMetadata(metadata map[string]string) map[string]string { | 
					
						
							|  |  |  | 	// Remove STANDARD StorageClass
 | 
					
						
							|  |  |  | 	metadata = removeStandardStorageClass(metadata) | 
					
						
							| 
									
										
										
										
											2019-03-01 03:01:25 +08:00
										 |  |  | 	// Clean meta etag keys 'md5Sum', 'etag', "expires".
 | 
					
						
							|  |  |  | 	return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires") | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-01-04 14:14:45 +08:00
										 |  |  | // Filter X-Amz-Storage-Class field only if it is set to STANDARD.
 | 
					
						
							|  |  |  | // This is done since AWS S3 doesn't return STANDARD Storage class as response header.
 | 
					
						
							|  |  |  | func removeStandardStorageClass(metadata map[string]string) map[string]string { | 
					
						
							|  |  |  | 	if metadata[amzStorageClass] == standardStorageClass { | 
					
						
							|  |  |  | 		delete(metadata, amzStorageClass) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return metadata | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // cleanMetadataKeys takes keyNames to be filtered
 | 
					
						
							|  |  |  | // and returns a new map with all the entries with keyNames removed.
 | 
					
						
							|  |  |  | func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[string]string { | 
					
						
							| 
									
										
										
										
											2017-05-15 03:05:51 +08:00
										 |  |  | 	var newMeta = make(map[string]string) | 
					
						
							|  |  |  | 	for k, v := range metadata { | 
					
						
							|  |  |  | 		if contains(keyNames, k) { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		newMeta[k] = v | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return newMeta | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Extracts etag value from the metadata.
 | 
					
						
							|  |  |  | func extractETag(metadata map[string]string) string { | 
					
						
							|  |  |  | 	// md5Sum tag is kept for backward compatibility.
 | 
					
						
							|  |  |  | 	etag, ok := metadata["md5Sum"] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		etag = metadata["etag"] | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Success.
 | 
					
						
							|  |  |  | 	return etag | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-04 15:27:50 +08:00
										 |  |  | // Prefix matcher string matches prefix in a platform specific way.
 | 
					
						
							|  |  |  | // For example on windows since its case insensitive we are supposed
 | 
					
						
							|  |  |  | // to do case insensitive checks.
 | 
					
						
							|  |  |  | func hasPrefix(s string, prefix string) bool { | 
					
						
							| 
									
										
										
										
											2017-02-19 05:41:59 +08:00
										 |  |  | 	if runtime.GOOS == globalWindowsOSName { | 
					
						
							| 
									
										
										
										
											2017-02-04 15:27:50 +08:00
										 |  |  | 		return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return strings.HasPrefix(s, prefix) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Suffix matcher string matches suffix in a platform specific way.
 | 
					
						
							|  |  |  | // For example on windows since its case insensitive we are supposed
 | 
					
						
							|  |  |  | // to do case insensitive checks.
 | 
					
						
							|  |  |  | func hasSuffix(s string, suffix string) bool { | 
					
						
							| 
									
										
										
										
											2017-02-19 05:41:59 +08:00
										 |  |  | 	if runtime.GOOS == globalWindowsOSName { | 
					
						
							| 
									
										
										
										
											2017-02-04 15:27:50 +08:00
										 |  |  | 		return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return strings.HasSuffix(s, suffix) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-19 05:41:59 +08:00
										 |  |  | // Validates if two strings are equal.
 | 
					
						
							|  |  |  | func isStringEqual(s1 string, s2 string) bool { | 
					
						
							|  |  |  | 	if runtime.GOOS == globalWindowsOSName { | 
					
						
							|  |  |  | 		return strings.EqualFold(s1, s2) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return s1 == s2 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-17 06:52:14 +08:00
										 |  |  | // Ignores all reserved bucket names or invalid bucket names.
 | 
					
						
							| 
									
										
										
										
											2019-03-06 02:42:32 +08:00
										 |  |  | func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool { | 
					
						
							| 
									
										
										
										
											2017-02-17 06:52:14 +08:00
										 |  |  | 	bucketEntry = strings.TrimSuffix(bucketEntry, slashSeparator) | 
					
						
							| 
									
										
										
										
											2019-03-06 02:42:32 +08:00
										 |  |  | 	if strict { | 
					
						
							|  |  |  | 		if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		if err := s3utils.CheckValidBucketName(bucketEntry); err != nil { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-02-17 06:52:14 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-03 19:01:42 +08:00
										 |  |  | 	return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns true if input bucket is a reserved minio meta bucket '.minio.sys'.
 | 
					
						
							|  |  |  | func isMinioMetaBucket(bucketName string) bool { | 
					
						
							|  |  |  | 	return bucketName == minioMetaBucket | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns true if input bucket is a reserved minio bucket 'minio'.
 | 
					
						
							|  |  |  | func isMinioReservedBucket(bucketName string) bool { | 
					
						
							|  |  |  | 	return bucketName == minioReservedBucket | 
					
						
							| 
									
										
										
										
											2017-02-17 06:52:14 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-12 03:02:30 +08:00
										 |  |  | // returns a slice of hosts by reading a slice of DNS records
 | 
					
						
							|  |  |  | func getHostsSlice(records []dns.SrvRecord) []string { | 
					
						
							|  |  |  | 	var hosts []string | 
					
						
							|  |  |  | 	for _, r := range records { | 
					
						
							|  |  |  | 		hosts = append(hosts, r.Host) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return hosts | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-12-19 21:13:47 +08:00
										 |  |  | // returns a host (and corresponding port) from a slice of DNS records
 | 
					
						
							|  |  |  | func getHostFromSrv(records []dns.SrvRecord) string { | 
					
						
							| 
									
										
										
										
											2018-05-16 09:20:22 +08:00
										 |  |  | 	rand.Seed(time.Now().Unix()) | 
					
						
							|  |  |  | 	srvRecord := records[rand.Intn(len(records))] | 
					
						
							| 
									
										
										
										
											2018-12-19 21:13:47 +08:00
										 |  |  | 	return net.JoinHostPort(srvRecord.Host, fmt.Sprintf("%d", srvRecord.Port)) | 
					
						
							| 
									
										
										
										
											2018-05-16 09:20:22 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | // IsCompressed returns true if the object is marked as compressed.
 | 
					
						
							|  |  |  | func (o ObjectInfo) IsCompressed() bool { | 
					
						
							|  |  |  | 	_, ok := o.UserDefined[ReservedMetadataPrefix+"compression"] | 
					
						
							|  |  |  | 	return ok | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // GetActualSize - read the decompressed size from the meta json.
 | 
					
						
							|  |  |  | func (o ObjectInfo) GetActualSize() int64 { | 
					
						
							|  |  |  | 	metadata := o.UserDefined | 
					
						
							|  |  |  | 	sizeStr, ok := metadata[ReservedMetadataPrefix+"actual-size"] | 
					
						
							|  |  |  | 	if ok { | 
					
						
							|  |  |  | 		size, err := strconv.ParseInt(sizeStr, 10, 64) | 
					
						
							|  |  |  | 		if err == nil { | 
					
						
							|  |  |  | 			return size | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return -1 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Disabling compression for encrypted enabled requests.
 | 
					
						
							|  |  |  | // Using compression and encryption together enables room for side channel attacks.
 | 
					
						
							|  |  |  | // Eliminate non-compressible objects by extensions/content-types.
 | 
					
						
							|  |  |  | func isCompressible(header http.Header, object string) bool { | 
					
						
							|  |  |  | 	if hasServerSideEncryptionHeader(header) || excludeForCompression(header, object) { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Eliminate the non-compressible objects.
 | 
					
						
							|  |  |  | func excludeForCompression(header http.Header, object string) bool { | 
					
						
							|  |  |  | 	objStr := object | 
					
						
							|  |  |  | 	contentType := header.Get("Content-Type") | 
					
						
							|  |  |  | 	if globalIsCompressionEnabled { | 
					
						
							|  |  |  | 		// We strictly disable compression for standard extensions/content-types (`compressed`).
 | 
					
						
							|  |  |  | 		if hasStringSuffixInSlice(objStr, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, contentType) { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Filter compression includes.
 | 
					
						
							|  |  |  | 		if len(globalCompressExtensions) > 0 || len(globalCompressMimeTypes) > 0 { | 
					
						
							|  |  |  | 			extensions := globalCompressExtensions | 
					
						
							|  |  |  | 			mimeTypes := globalCompressMimeTypes | 
					
						
							|  |  |  | 			if hasStringSuffixInSlice(objStr, extensions) || hasPattern(mimeTypes, contentType) { | 
					
						
							|  |  |  | 				return false | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Utility which returns if a string is present in the list.
 | 
					
						
							|  |  |  | func hasStringSuffixInSlice(str string, list []string) bool { | 
					
						
							|  |  |  | 	for _, v := range list { | 
					
						
							|  |  |  | 		if strings.HasSuffix(str, v) { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return false | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns true if any of the given wildcard patterns match the matchStr.
 | 
					
						
							|  |  |  | func hasPattern(patterns []string, matchStr string) bool { | 
					
						
							|  |  |  | 	for _, pattern := range patterns { | 
					
						
							|  |  |  | 		if ok := wildcard.MatchSimple(pattern, matchStr); ok { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return false | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns the part file name which matches the partNumber and etag.
 | 
					
						
							|  |  |  | func getPartFile(entries []string, partNumber int, etag string) string { | 
					
						
							|  |  |  | 	for _, entry := range entries { | 
					
						
							|  |  |  | 		if strings.HasPrefix(entry, fmt.Sprintf("%.5d.%s.", partNumber, etag)) { | 
					
						
							|  |  |  | 			return entry | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return "" | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returs the compressed offset which should be skipped.
 | 
					
						
							|  |  |  | func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (int64, int64) { | 
					
						
							|  |  |  | 	var compressedOffset int64 | 
					
						
							|  |  |  | 	var skipLength int64 | 
					
						
							|  |  |  | 	var cumulativeActualSize int64 | 
					
						
							|  |  |  | 	if len(objectInfo.Parts) > 0 { | 
					
						
							|  |  |  | 		for _, part := range objectInfo.Parts { | 
					
						
							|  |  |  | 			cumulativeActualSize += part.ActualSize | 
					
						
							|  |  |  | 			if cumulativeActualSize <= offset { | 
					
						
							|  |  |  | 				compressedOffset += part.Size | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				skipLength = cumulativeActualSize - part.ActualSize | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return compressedOffset, offset - skipLength | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-30 05:24:10 +08:00
										 |  |  | // byBucketName is a collection satisfying sort.Interface.
 | 
					
						
							|  |  |  | type byBucketName []BucketInfo | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (d byBucketName) Len() int           { return len(d) } | 
					
						
							|  |  |  | func (d byBucketName) Swap(i, j int)      { d[i], d[j] = d[j], d[i] } | 
					
						
							|  |  |  | func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name } | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // GetObjectReader is a type that wraps a reader with a lock to
 | 
					
						
							|  |  |  | // provide a ReadCloser interface that unlocks on Close()
 | 
					
						
							|  |  |  | type GetObjectReader struct { | 
					
						
							|  |  |  | 	ObjInfo ObjectInfo | 
					
						
							|  |  |  | 	pReader io.Reader | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	cleanUpFns []func() | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 	precondFn  func(ObjectInfo, string) bool | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	once       sync.Once | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // NewGetObjectReaderFromReader sets up a GetObjectReader with a given
 | 
					
						
							|  |  |  | // reader. This ignores any object properties.
 | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (*GetObjectReader, error) { | 
					
						
							|  |  |  | 	if pcfn != nil { | 
					
						
							|  |  |  | 		if ok := pcfn(oi, ""); ok { | 
					
						
							|  |  |  | 			// Call the cleanup funcs
 | 
					
						
							|  |  |  | 			for i := len(cleanupFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 				cleanupFns[i]() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return nil, PreConditionFailed{} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	return &GetObjectReader{ | 
					
						
							|  |  |  | 		ObjInfo:    oi, | 
					
						
							|  |  |  | 		pReader:    r, | 
					
						
							|  |  |  | 		cleanUpFns: cleanupFns, | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 		precondFn:  pcfn, | 
					
						
							|  |  |  | 	}, nil | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ObjReaderFn is a function type that takes a reader and returns
 | 
					
						
							|  |  |  | // GetObjectReader and an error. Request headers are passed to provide
 | 
					
						
							|  |  |  | // encryption parameters. cleanupFns allow cleanup funcs to be
 | 
					
						
							|  |  |  | // registered for calling after usage of the reader.
 | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
 | 
					
						
							|  |  |  | // are called on Close() in reverse order as passed here. NOTE: It is
 | 
					
						
							|  |  |  | // assumed that clean up functions do not panic (otherwise, they may
 | 
					
						
							|  |  |  | // not all run!).
 | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanUpFns ...func()) ( | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	fn ObjReaderFn, off, length int64, err error) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Call the clean-up functions immediately in case of exit
 | 
					
						
							|  |  |  | 	// with error
 | 
					
						
							|  |  |  | 	defer func() { | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			for i := len(cleanUpFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 				cleanUpFns[i]() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	isEncrypted := crypto.IsEncrypted(oi.UserDefined) | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	isCompressed := oi.IsCompressed() | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	var skipLen int64 | 
					
						
							|  |  |  | 	// Calculate range to read (different for
 | 
					
						
							|  |  |  | 	// e.g. encrypted/compressed objects)
 | 
					
						
							|  |  |  | 	switch { | 
					
						
							|  |  |  | 	case isEncrypted: | 
					
						
							|  |  |  | 		var seqNumber uint32 | 
					
						
							|  |  |  | 		var partStart int | 
					
						
							|  |  |  | 		off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, 0, 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		var decSize int64 | 
					
						
							|  |  |  | 		decSize, err = oi.DecryptedSize() | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, 0, 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		var decRangeLength int64 | 
					
						
							|  |  |  | 		decRangeLength, err = rs.GetLength(decSize) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, 0, 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// We define a closure that performs decryption given
 | 
					
						
							|  |  |  | 		// a reader that returns the desired range of
 | 
					
						
							|  |  |  | 		// encrypted bytes. The header parameter is used to
 | 
					
						
							|  |  |  | 		// provide encryption parameters.
 | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 		fn = func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-26 03:39:46 +08:00
										 |  |  | 			copySource := h.Get(crypto.SSECopyAlgorithm) != "" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			cFns = append(cleanUpFns, cFns...) | 
					
						
							|  |  |  | 			// Attach decrypter on inputReader
 | 
					
						
							|  |  |  | 			var decReader io.Reader | 
					
						
							|  |  |  | 			decReader, err = DecryptBlocksRequestR(inputReader, h, | 
					
						
							| 
									
										
										
										
											2018-09-26 03:39:46 +08:00
										 |  |  | 				off, length, seqNumber, partStart, oi, copySource) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				// Call the cleanup funcs
 | 
					
						
							|  |  |  | 				for i := len(cFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 					cFns[i]() | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return nil, err | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 			encETag := oi.ETag | 
					
						
							|  |  |  | 			oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if pcfn != nil { | 
					
						
							|  |  |  | 				if ok := pcfn(oi, encETag); ok { | 
					
						
							|  |  |  | 					// Call the cleanup funcs
 | 
					
						
							|  |  |  | 					for i := len(cFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 						cFns[i]() | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return nil, PreConditionFailed{} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-12-20 06:12:53 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			// Apply the skipLen and limit on the
 | 
					
						
							|  |  |  | 			// decrypted stream
 | 
					
						
							|  |  |  | 			decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Assemble the GetObjectReader
 | 
					
						
							|  |  |  | 			r = &GetObjectReader{ | 
					
						
							|  |  |  | 				ObjInfo:    oi, | 
					
						
							|  |  |  | 				pReader:    decReader, | 
					
						
							|  |  |  | 				cleanUpFns: cFns, | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 				precondFn:  pcfn, | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			return r, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	case isCompressed: | 
					
						
							|  |  |  | 		// Read the decompressed size from the meta.json.
 | 
					
						
							|  |  |  | 		actualSize := oi.GetActualSize() | 
					
						
							|  |  |  | 		if actualSize < 0 { | 
					
						
							|  |  |  | 			return nil, 0, 0, errInvalidDecompressedSize | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		off, length = int64(0), oi.Size | 
					
						
							|  |  |  | 		decOff, decLength := int64(0), actualSize | 
					
						
							|  |  |  | 		if rs != nil { | 
					
						
							|  |  |  | 			off, length, err = rs.GetOffsetLength(actualSize) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return nil, 0, 0, err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			// Incase of range based queries on multiparts, the offset and length are reduced.
 | 
					
						
							|  |  |  | 			off, decOff = getCompressedOffsets(oi, off) | 
					
						
							|  |  |  | 			decLength = length | 
					
						
							|  |  |  | 			length = oi.Size - off | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// For negative length we read everything.
 | 
					
						
							|  |  |  | 			if decLength < 0 { | 
					
						
							|  |  |  | 				decLength = actualSize - decOff | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Reply back invalid range if the input offset and length fall out of range.
 | 
					
						
							|  |  |  | 			if decOff > actualSize || decOff+decLength > actualSize { | 
					
						
							|  |  |  | 				return nil, 0, 0, errInvalidRange | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 		fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) { | 
					
						
							|  |  |  | 			cFns = append(cleanUpFns, cFns...) | 
					
						
							|  |  |  | 			if pcfn != nil { | 
					
						
							|  |  |  | 				if ok := pcfn(oi, ""); ok { | 
					
						
							|  |  |  | 					// Call the cleanup funcs
 | 
					
						
							|  |  |  | 					for i := len(cFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 						cFns[i]() | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return nil, PreConditionFailed{} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 			// Decompression reader.
 | 
					
						
							|  |  |  | 			snappyReader := snappy.NewReader(inputReader) | 
					
						
							|  |  |  | 			// Apply the skipLen and limit on the
 | 
					
						
							|  |  |  | 			// decompressed stream
 | 
					
						
							|  |  |  | 			decReader := io.LimitReader(ioutil.NewSkipReader(snappyReader, decOff), decLength) | 
					
						
							|  |  |  | 			oi.Size = decLength | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Assemble the GetObjectReader
 | 
					
						
							|  |  |  | 			r = &GetObjectReader{ | 
					
						
							|  |  |  | 				ObjInfo:    oi, | 
					
						
							|  |  |  | 				pReader:    decReader, | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 				cleanUpFns: cFns, | 
					
						
							|  |  |  | 				precondFn:  pcfn, | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			return r, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	default: | 
					
						
							|  |  |  | 		off, length, err = rs.GetOffsetLength(oi.Size) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, 0, 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 		fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) { | 
					
						
							|  |  |  | 			cFns = append(cleanUpFns, cFns...) | 
					
						
							|  |  |  | 			if pcfn != nil { | 
					
						
							|  |  |  | 				if ok := pcfn(oi, ""); ok { | 
					
						
							|  |  |  | 					// Call the cleanup funcs
 | 
					
						
							|  |  |  | 					for i := len(cFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 						cFns[i]() | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return nil, PreConditionFailed{} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			r = &GetObjectReader{ | 
					
						
							|  |  |  | 				ObjInfo:    oi, | 
					
						
							|  |  |  | 				pReader:    inputReader, | 
					
						
							| 
									
										
										
										
											2019-03-07 04:38:41 +08:00
										 |  |  | 				cleanUpFns: cFns, | 
					
						
							|  |  |  | 				precondFn:  pcfn, | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			return r, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return fn, off, length, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Close - calls the cleanup actions in reverse order
 | 
					
						
							|  |  |  | func (g *GetObjectReader) Close() error { | 
					
						
							|  |  |  | 	// sync.Once is used here to ensure that Close() is
 | 
					
						
							|  |  |  | 	// idempotent.
 | 
					
						
							|  |  |  | 	g.once.Do(func() { | 
					
						
							|  |  |  | 		for i := len(g.cleanUpFns) - 1; i >= 0; i-- { | 
					
						
							|  |  |  | 			g.cleanUpFns[i]() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Read - to implement Reader interface.
 | 
					
						
							|  |  |  | func (g *GetObjectReader) Read(p []byte) (n int, err error) { | 
					
						
							|  |  |  | 	n, err = g.pReader.Read(p) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// Calling code may not Close() in case of error, so
 | 
					
						
							|  |  |  | 		// we ensure it.
 | 
					
						
							|  |  |  | 		g.Close() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | //SealMD5CurrFn seals md5sum with object encryption key and returns sealed
 | 
					
						
							|  |  |  | // md5sum
 | 
					
						
							|  |  |  | type SealMD5CurrFn func([]byte) []byte | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObjReader is a type that wraps sio.EncryptReader and
 | 
					
						
							|  |  |  | // underlying hash.Reader in a struct
 | 
					
						
							|  |  |  | type PutObjReader struct { | 
					
						
							|  |  |  | 	*hash.Reader              // actual data stream
 | 
					
						
							|  |  |  | 	rawReader    *hash.Reader // original data stream
 | 
					
						
							|  |  |  | 	sealMD5Fn    SealMD5CurrFn | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Size returns the absolute number of bytes the Reader
 | 
					
						
							|  |  |  | // will return during reading. It returns -1 for unlimited
 | 
					
						
							|  |  |  | // data.
 | 
					
						
							|  |  |  | func (p *PutObjReader) Size() int64 { | 
					
						
							|  |  |  | 	return p.Reader.Size() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // MD5CurrentHexString returns the current MD5Sum or encrypted MD5Sum
 | 
					
						
							|  |  |  | // as a hex encoded string
 | 
					
						
							|  |  |  | func (p *PutObjReader) MD5CurrentHexString() string { | 
					
						
							|  |  |  | 	md5sumCurr := p.rawReader.MD5Current() | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  | 	var appendHyphen bool | 
					
						
							|  |  |  | 	// md5sumcurr is not empty in two scenarios
 | 
					
						
							|  |  |  | 	// - server is running in strict compatibility mode
 | 
					
						
							|  |  |  | 	// - client set Content-Md5 during PUT operation
 | 
					
						
							|  |  |  | 	if len(md5sumCurr) == 0 { | 
					
						
							|  |  |  | 		// md5sumCurr is only empty when we are running
 | 
					
						
							|  |  |  | 		// in non-compatibility mode.
 | 
					
						
							|  |  |  | 		md5sumCurr = make([]byte, 16) | 
					
						
							|  |  |  | 		rand.Read(md5sumCurr) | 
					
						
							|  |  |  | 		appendHyphen = true | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	if p.sealMD5Fn != nil { | 
					
						
							|  |  |  | 		md5sumCurr = p.sealMD5Fn(md5sumCurr) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-05-09 09:35:40 +08:00
										 |  |  | 	if appendHyphen { | 
					
						
							|  |  |  | 		// Make sure to return etag string upto 32 length, for SSE
 | 
					
						
							|  |  |  | 		// requests ETag might be longer and the code decrypting the
 | 
					
						
							|  |  |  | 		// ETag ignores ETag in multipart ETag form i.e <hex>-N
 | 
					
						
							|  |  |  | 		return hex.EncodeToString(md5sumCurr)[:32] + "-1" | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	return hex.EncodeToString(md5sumCurr) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // NewPutObjReader returns a new PutObjReader and holds
 | 
					
						
							|  |  |  | // reference to underlying data stream from client and the encrypted
 | 
					
						
							|  |  |  | // data reader
 | 
					
						
							|  |  |  | func NewPutObjReader(rawReader *hash.Reader, encReader *hash.Reader, encKey []byte) *PutObjReader { | 
					
						
							|  |  |  | 	p := PutObjReader{Reader: rawReader, rawReader: rawReader} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-28 05:23:32 +08:00
										 |  |  | 	if len(encKey) != 0 && encReader != nil { | 
					
						
							|  |  |  | 		var objKey crypto.ObjectKey | 
					
						
							|  |  |  | 		copy(objKey[:], encKey) | 
					
						
							|  |  |  | 		p.sealMD5Fn = sealETagFn(objKey) | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		p.Reader = encReader | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-28 05:23:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	return &p | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte { | 
					
						
							|  |  |  | 	var emptyKey [32]byte | 
					
						
							|  |  |  | 	if bytes.Equal(encKey[:], emptyKey[:]) { | 
					
						
							|  |  |  | 		return md5CurrSum | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return encKey.SealETag(md5CurrSum) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2018-11-28 05:23:32 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn { | 
					
						
							| 
									
										
										
										
											2018-11-28 05:23:32 +08:00
										 |  |  | 	fn := func(md5sumcurr []byte) []byte { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		return sealETag(key, md5sumcurr) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-28 05:23:32 +08:00
										 |  |  | 	return fn | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // CleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal
 | 
					
						
							|  |  |  | // encryption metadata that was sent by minio gateway
 | 
					
						
							|  |  |  | func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string { | 
					
						
							|  |  |  | 	var newMeta = make(map[string]string, len(metadata)) | 
					
						
							|  |  |  | 	for k, v := range metadata { | 
					
						
							|  |  |  | 		if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") { | 
					
						
							|  |  |  | 			newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			newMeta[k] = v | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return newMeta | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-03-06 00:35:37 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // snappyCompressReader compresses data as it reads
 | 
					
						
							|  |  |  | // from the underlying io.Reader.
 | 
					
						
							|  |  |  | type snappyCompressReader struct { | 
					
						
							|  |  |  | 	r      io.Reader | 
					
						
							|  |  |  | 	w      *snappy.Writer | 
					
						
							|  |  |  | 	closed bool | 
					
						
							|  |  |  | 	buf    bytes.Buffer | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func newSnappyCompressReader(r io.Reader) *snappyCompressReader { | 
					
						
							|  |  |  | 	cr := &snappyCompressReader{r: r} | 
					
						
							|  |  |  | 	cr.w = snappy.NewBufferedWriter(&cr.buf) | 
					
						
							|  |  |  | 	return cr | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (cr *snappyCompressReader) Read(p []byte) (int, error) { | 
					
						
							|  |  |  | 	if cr.closed { | 
					
						
							|  |  |  | 		// if snappy writer is closed r has been completely read,
 | 
					
						
							|  |  |  | 		// return any remaining data in buf.
 | 
					
						
							|  |  |  | 		return cr.buf.Read(p) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// read from original using p as buffer
 | 
					
						
							|  |  |  | 	nr, readErr := cr.r.Read(p) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// write read bytes to snappy writer
 | 
					
						
							|  |  |  | 	nw, err := cr.w.Write(p[:nr]) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if nw != nr { | 
					
						
							|  |  |  | 		return 0, io.ErrShortWrite | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// if last of data from reader, close snappy writer to flush
 | 
					
						
							|  |  |  | 	if readErr == io.EOF { | 
					
						
							|  |  |  | 		err := cr.w.Close() | 
					
						
							|  |  |  | 		cr.closed = true | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// read compressed bytes out of buf
 | 
					
						
							|  |  |  | 	n, err := cr.buf.Read(p) | 
					
						
							|  |  |  | 	if readErr != io.EOF && (err == nil || err == io.EOF) { | 
					
						
							|  |  |  | 		err = readErr | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return n, err | 
					
						
							|  |  |  | } |