| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2016-05-03 07:57:31 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-19 07:23:42 +08:00
										 |  |  | package cmd | 
					
						
							| 
									
										
										
										
											2016-05-03 07:57:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	"encoding/json" | 
					
						
							| 
									
										
										
										
											2020-09-18 15:16:16 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2016-05-03 07:57:31 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	"io/ioutil" | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	"sort" | 
					
						
							|  |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2016-12-20 11:32:55 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | 	jsoniter "github.com/json-iterator/go" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	xioutil "github.com/minio/minio/internal/ioutil" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/logger" | 
					
						
							| 
									
										
										
										
											2021-05-29 06:17:01 +08:00
										 |  |  | 	"github.com/minio/pkg/trie" | 
					
						
							| 
									
										
										
										
											2016-05-03 07:57:31 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | // Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID
 | 
					
						
							| 
									
										
										
										
											2018-02-21 04:21:12 +08:00
										 |  |  | func (fs *FSObjects) getUploadIDDir(bucket, object, uploadID string) string { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))), uploadID) | 
					
						
							| 
									
										
										
										
											2017-01-27 04:51:12 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | // Returns EXPORT/.minio.sys/multipart/SHA256
 | 
					
						
							| 
									
										
										
										
											2018-02-21 04:21:12 +08:00
										 |  |  | func (fs *FSObjects) getMultipartSHADir(bucket, object string) string { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object)))) | 
					
						
							| 
									
										
										
										
											2017-01-27 04:51:12 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | // Returns partNumber.etag
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | func (fs *FSObjects) encodePartFile(partNumber int, etag string, actualSize int64) string { | 
					
						
							|  |  |  | 	return fmt.Sprintf("%.5d.%s.%d", partNumber, etag, actualSize) | 
					
						
							| 
									
										
										
										
											2017-01-27 04:51:12 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | // Returns partNumber and etag
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, actualSize int64, err error) { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	result := strings.Split(name, ".") | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	if len(result) != 3 { | 
					
						
							|  |  |  | 		return 0, "", 0, errUnexpected | 
					
						
							| 
									
										
										
										
											2017-09-01 02:29:22 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	partNumber, err = strconv.Atoi(result[0]) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		return 0, "", 0, errUnexpected | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	actualSize, err = strconv.ParseInt(result[2], 10, 64) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return 0, "", 0, errUnexpected | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return partNumber, result[1], actualSize, nil | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | // Appends parts to an appendFile sequentially.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	fs.appendFileMapMu.Lock() | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	file := fs.appendFileMap[uploadID] | 
					
						
							|  |  |  | 	if file == nil { | 
					
						
							|  |  |  | 		file = &fsAppendFile{ | 
					
						
							|  |  |  | 			filePath: pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())), | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		fs.appendFileMap[uploadID] = file | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	fs.appendFileMapMu.Unlock() | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	file.Lock() | 
					
						
							|  |  |  | 	defer file.Unlock() | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	// Since we append sequentially nextPartNumber will always be len(file.parts)+1
 | 
					
						
							|  |  |  | 	nextPartNumber := len(file.parts) + 1 | 
					
						
							|  |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	entries, err := readDir(uploadIDDir) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir) | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	sort.Strings(entries) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	for _, entry := range entries { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		if entry == fs.metaJSONFile { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		partNumber, etag, actualSize, err := fs.decodePartFile(entry) | 
					
						
							| 
									
										
										
										
											2016-05-27 05:43:17 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2019-04-13 06:51:32 +08:00
										 |  |  | 			// Skip part files whose name don't match expected format. These could be backend filesystem specific files.
 | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2016-05-27 05:43:17 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if partNumber < nextPartNumber { | 
					
						
							|  |  |  | 			// Part already appended.
 | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2016-05-27 05:43:17 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if partNumber > nextPartNumber { | 
					
						
							|  |  |  | 			// Required part number is not yet uploaded.
 | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2016-05-27 05:43:17 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		partPath := pathJoin(uploadIDDir, entry) | 
					
						
							| 
									
										
										
										
											2021-02-24 16:14:16 +08:00
										 |  |  | 		err = xioutil.AppendFile(file.filePath, partPath, globalFSOSync) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath) | 
					
						
							|  |  |  | 			reqInfo.AppendTags("filepath", file.filePath) | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-07-08 00:06:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		file.parts = append(file.parts, PartInfo{PartNumber: partNumber, ETag: etag, ActualSize: actualSize}) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		nextPartNumber++ | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | // ListMultipartUploads - lists all the uploadIDs for the specified object.
 | 
					
						
							|  |  |  | // We do not support prefix based listing.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil { | 
					
						
							|  |  |  | 		return result, toObjectErr(err) | 
					
						
							| 
									
										
										
										
											2017-08-05 01:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							|  |  |  | 		return result, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2017-08-05 01:45:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | 	result.MaxUploads = maxUploads | 
					
						
							|  |  |  | 	result.KeyMarker = keyMarker | 
					
						
							|  |  |  | 	result.Prefix = object | 
					
						
							|  |  |  | 	result.Delimiter = delimiter | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	result.NextKeyMarker = object | 
					
						
							|  |  |  | 	result.UploadIDMarker = uploadIDMarker | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadIDs, err := readDir(fs.getMultipartSHADir(bucket, object)) | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if err == errFileNotFound { | 
					
						
							|  |  |  | 			result.IsTruncated = false | 
					
						
							|  |  |  | 			return result, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return result, toObjectErr(err) | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-07-23 13:36:15 +08:00
										 |  |  | 	// S3 spec says uploadIDs should be sorted based on initiated time. ModTime of fs.json
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	// is the creation time of the uploadID, hence we will use that.
 | 
					
						
							|  |  |  | 	var uploads []MultipartInfo | 
					
						
							|  |  |  | 	for _, uploadID := range uploadIDs { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		fi, err := fsStatFile(ctx, metaFilePath) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return result, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		uploads = append(uploads, MultipartInfo{ | 
					
						
							|  |  |  | 			Object:    object, | 
					
						
							| 
									
										
										
										
											2019-08-07 03:08:58 +08:00
										 |  |  | 			UploadID:  strings.TrimSuffix(uploadID, SlashSeparator), | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			Initiated: fi.ModTime(), | 
					
						
							|  |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	sort.Slice(uploads, func(i int, j int) bool { | 
					
						
							|  |  |  | 		return uploads[i].Initiated.Before(uploads[j].Initiated) | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadIndex := 0 | 
					
						
							|  |  |  | 	if uploadIDMarker != "" { | 
					
						
							|  |  |  | 		for uploadIndex < len(uploads) { | 
					
						
							|  |  |  | 			if uploads[uploadIndex].UploadID != uploadIDMarker { | 
					
						
							|  |  |  | 				uploadIndex++ | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if uploads[uploadIndex].UploadID == uploadIDMarker { | 
					
						
							|  |  |  | 				uploadIndex++ | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			uploadIndex++ | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for uploadIndex < len(uploads) { | 
					
						
							|  |  |  | 		result.Uploads = append(result.Uploads, uploads[uploadIndex]) | 
					
						
							|  |  |  | 		result.NextUploadIDMarker = uploads[uploadIndex].UploadID | 
					
						
							|  |  |  | 		uploadIndex++ | 
					
						
							|  |  |  | 		if len(result.Uploads) == maxUploads { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result.IsTruncated = uploadIndex < len(uploads) | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if !result.IsTruncated { | 
					
						
							|  |  |  | 		result.NextKeyMarker = "" | 
					
						
							|  |  |  | 		result.NextUploadIDMarker = "" | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return result, nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | // NewMultipartUpload - initialize a new multipart upload, returns a
 | 
					
						
							|  |  |  | // unique id. The unique id returned here is of UUID form, for each
 | 
					
						
							|  |  |  | // subsequent request each UUID is unique.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Implements S3 compatible initiate multipart API.
 | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return "", toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 		return "", toObjectErr(err, bucket) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadID := mustGetUUID() | 
					
						
							|  |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	err := mkdirAll(uploadIDDir, 0o755) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return "", err | 
					
						
							| 
									
										
										
										
											2017-09-01 02:29:22 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	// Initialize fs.json values.
 | 
					
						
							|  |  |  | 	fsMeta := newFSMetaV1() | 
					
						
							| 
									
										
										
										
											2019-02-09 13:31:06 +08:00
										 |  |  | 	fsMeta.Meta = opts.UserDefined | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	fsMetaBytes, err := json.Marshal(fsMeta) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return "", err | 
					
						
							| 
									
										
										
										
											2016-09-01 04:42:57 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return "", err | 
					
						
							| 
									
										
										
										
											2016-09-01 04:42:57 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	return uploadID, nil | 
					
						
							| 
									
										
										
										
											2016-09-01 04:42:57 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | // CopyObjectPart - similar to PutObjectPart but reads data from an existing
 | 
					
						
							|  |  |  | // object. Internally incoming data is written to '.minio.sys/tmp' location
 | 
					
						
							|  |  |  | // and safely renamed to '.minio.sys/multipart' for reach parts.
 | 
					
						
							| 
									
										
										
										
											2018-03-15 03:01:47 +08:00
										 |  |  | func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error, | 
					
						
							|  |  |  | ) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { | 
					
						
							|  |  |  | 		return pi, VersionNotFound{ | 
					
						
							|  |  |  | 			Bucket:    srcBucket, | 
					
						
							|  |  |  | 			Object:    srcObject, | 
					
						
							|  |  |  | 			VersionID: srcOpts.VersionID, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil { | 
					
						
							|  |  |  | 		return pi, toObjectErr(err) | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return pi, toObjectErr(err, dstBucket, dstObject) | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return partInfo, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-03 06:19:13 +08:00
										 |  |  | // PutObjectPart - reads incoming data until EOF for the part file on
 | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | // an ongoing multipart transaction. Internally incoming data is
 | 
					
						
							| 
									
										
										
										
											2016-09-07 11:31:50 +08:00
										 |  |  | // written to '.minio.sys/tmp' location and safely renamed to
 | 
					
						
							|  |  |  | // '.minio.sys/multipart' for reach parts.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	if opts.VersionID != "" && opts.VersionID != nullVersionID { | 
					
						
							|  |  |  | 		return pi, VersionNotFound{ | 
					
						
							|  |  |  | 			Bucket:    bucket, | 
					
						
							|  |  |  | 			Object:    object, | 
					
						
							|  |  |  | 			VersionID: opts.VersionID, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	data := r.Reader | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil { | 
					
						
							|  |  |  | 		return pi, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2016-06-03 06:19:13 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							|  |  |  | 		return pi, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	// Validate input data size and it can never be less than -1.
 | 
					
						
							|  |  |  | 	if data.Size() < -1 { | 
					
						
							| 
									
										
										
										
											2019-10-12 09:50:54 +08:00
										 |  |  | 		logger.LogIf(ctx, errInvalidArgument, logger.Application) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return pi, toObjectErr(errInvalidArgument) | 
					
						
							| 
									
										
										
										
											2017-10-07 00:38:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							| 
									
										
										
										
											2016-06-03 06:19:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Just check if the uploadID exists to avoid copy if it doesn't.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 			return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return pi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID)) | 
					
						
							| 
									
										
											  
											
												fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
   385  // copyBuffer is the actual implementation of Copy and CopyBuffer.
   386  // if buf is nil, one is allocated.
   387  func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
   388  	// If the reader has a WriteTo method, use it to do the copy.
   389  	// Avoids an allocation and a copy.
   390  	if wt, ok := src.(WriterTo); ok {
   391  		return wt.WriteTo(dst)
   392  	}
   393  	// Similarly, if the writer has a ReadFrom method, use it to do the copy.
   394  	if rt, ok := dst.(ReaderFrom); ok {
   395  		return rt.ReadFrom(src)
   396  	}
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
	if a.err != nil {
		return 0, a.err
	}
	n = 0
	for {
		err = a.fill()
		if err != nil {
			return n, err
		}
		n2, err := w.Write(a.cur.buffer())
		a.cur.inc(n2)
		n += int64(n2)
		if err != nil {
			return n, err
		}
```
											
										 
											2021-01-07 01:36:55 +08:00
										 |  |  | 	bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, data.Size()) | 
					
						
							| 
									
										
										
										
											2020-09-01 03:35:40 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Delete temporary part in case of failure. If
 | 
					
						
							|  |  |  | 	// PutObjectPart succeeds then there would be nothing to
 | 
					
						
							|  |  |  | 	// delete in which case we just ignore the error.
 | 
					
						
							|  |  |  | 	defer fsRemoveFile(ctx, tmpPartPath) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath) | 
					
						
							| 
									
										
										
										
											2016-07-19 10:06:48 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-04 03:53:12 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-19 10:06:48 +08:00
										 |  |  | 	// Should return IncompleteBody{} error when reader has fewer
 | 
					
						
							|  |  |  | 	// bytes than specified in request header.
 | 
					
						
							| 
									
										
										
										
											2017-09-20 03:40:27 +08:00
										 |  |  | 	if bytesWritten < data.Size() { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 		return pi, IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	etag := r.MD5CurrentHexString() | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if etag == "" { | 
					
						
							|  |  |  | 		etag = GenETag() | 
					
						
							| 
									
										
										
										
											2017-09-01 02:29:22 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize())) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-07-23 13:36:15 +08:00
										 |  |  | 	// Make sure not to create parent directories if they don't exist - the upload might have been aborted.
 | 
					
						
							| 
									
										
										
										
											2021-11-19 08:09:12 +08:00
										 |  |  | 	if err = Rename(tmpPartPath, partPath); err != nil { | 
					
						
							| 
									
										
										
										
											2019-07-23 13:36:15 +08:00
										 |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 			return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2019-07-23 13:36:15 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	go fs.backgroundAppend(ctx, bucket, object, uploadID) | 
					
						
							| 
									
										
										
										
											2017-10-22 13:30:34 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	fi, err := fsStatFile(ctx, partPath) | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 	return PartInfo{ | 
					
						
							|  |  |  | 		PartNumber:   partID, | 
					
						
							|  |  |  | 		LastModified: fi.ModTime(), | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		ETag:         etag, | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 		Size:         fi.Size(), | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		ActualSize:   data.ActualSize(), | 
					
						
							| 
									
										
										
										
											2017-02-01 01:38:34 +08:00
										 |  |  | 	}, nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | // GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used
 | 
					
						
							|  |  |  | // by callers to verify object states
 | 
					
						
							|  |  |  | // - encrypted
 | 
					
						
							|  |  |  | // - compressed
 | 
					
						
							|  |  |  | func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { | 
					
						
							|  |  |  | 	minfo := MultipartInfo{ | 
					
						
							|  |  |  | 		Bucket:   bucket, | 
					
						
							|  |  |  | 		Object:   object, | 
					
						
							|  |  |  | 		UploadID: uploadID, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil { | 
					
						
							|  |  |  | 		return minfo, toObjectErr(err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Check if bucket exists
 | 
					
						
							|  |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							|  |  |  | 		return minfo, toObjectErr(err, bucket) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							|  |  |  | 	if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil { | 
					
						
							|  |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 			return minfo, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		return minfo, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-24 16:14:16 +08:00
										 |  |  | 	fsMetaBytes, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return minfo, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var fsMeta fsMetaV1 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	json := jsoniter.ConfigCompatibleWithStandardLibrary | 
					
						
							| 
									
										
										
										
											2020-05-29 03:36:20 +08:00
										 |  |  | 	if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { | 
					
						
							|  |  |  | 		return minfo, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	minfo.UserDefined = fsMeta.Meta | 
					
						
							|  |  |  | 	return minfo, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | // ListObjectParts - lists all previously uploaded parts for a given
 | 
					
						
							|  |  |  | // object and uploadID.  Takes additional input of part-number-marker
 | 
					
						
							|  |  |  | // to indicate where the listing should begin from.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Implements S3 compatible ListObjectParts API. The resulting
 | 
					
						
							|  |  |  | // ListPartsInfo structure is unmarshalled directly into XML and
 | 
					
						
							|  |  |  | // replied back to the client.
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil { | 
					
						
							|  |  |  | 		return result, toObjectErr(err) | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	result.Bucket = bucket | 
					
						
							|  |  |  | 	result.Object = object | 
					
						
							|  |  |  | 	result.UploadID = uploadID | 
					
						
							|  |  |  | 	result.MaxParts = maxParts | 
					
						
							|  |  |  | 	result.PartNumberMarker = partNumberMarker | 
					
						
							| 
									
										
										
										
											2016-09-01 02:39:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-08-05 01:45:57 +08:00
										 |  |  | 	// Check if bucket exists
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							|  |  |  | 		return result, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2016-05-04 07:10:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 			return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return result, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-10-06 07:08:25 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	entries, err := readDir(uploadIDDir) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return result, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2016-10-30 03:44:44 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 	partsMap := make(map[int]PartInfo) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	for _, entry := range entries { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		if entry == fs.metaJSONFile { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		partNumber, currentEtag, actualSize, derr := fs.decodePartFile(entry) | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 		if derr != nil { | 
					
						
							| 
									
										
										
										
											2019-04-13 06:51:32 +08:00
										 |  |  | 			// Skip part files whose name don't match expected format. These could be backend filesystem specific files.
 | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		entryStat, err := fsStatFile(ctx, pathJoin(uploadIDDir, entry)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		currentMeta := PartInfo{ | 
					
						
							|  |  |  | 			PartNumber:   partNumber, | 
					
						
							|  |  |  | 			ETag:         currentEtag, | 
					
						
							|  |  |  | 			ActualSize:   actualSize, | 
					
						
							|  |  |  | 			Size:         entryStat.Size(), | 
					
						
							|  |  |  | 			LastModified: entryStat.ModTime(), | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		cachedMeta, ok := partsMap[partNumber] | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			partsMap[partNumber] = currentMeta | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if currentMeta.LastModified.After(cachedMeta.LastModified) { | 
					
						
							|  |  |  | 			partsMap[partNumber] = currentMeta | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	var parts []PartInfo | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 	for _, partInfo := range partsMap { | 
					
						
							|  |  |  | 		parts = append(parts, partInfo) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-28 11:03:00 +08:00
										 |  |  | 	sort.Slice(parts, func(i int, j int) bool { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return parts[i].PartNumber < parts[j].PartNumber | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	i := 0 | 
					
						
							|  |  |  | 	if partNumberMarker != 0 { | 
					
						
							|  |  |  | 		// If the marker was set, skip the entries till the marker.
 | 
					
						
							|  |  |  | 		for _, part := range parts { | 
					
						
							|  |  |  | 			i++ | 
					
						
							|  |  |  | 			if part.PartNumber == partNumberMarker { | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	partsCount := 0 | 
					
						
							|  |  |  | 	for partsCount < maxParts && i < len(parts) { | 
					
						
							|  |  |  | 		result.Parts = append(result.Parts, parts[i]) | 
					
						
							|  |  |  | 		i++ | 
					
						
							|  |  |  | 		partsCount++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if i < len(parts) { | 
					
						
							|  |  |  | 		result.IsTruncated = true | 
					
						
							|  |  |  | 		if partsCount != 0 { | 
					
						
							|  |  |  | 			result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	rc, _, err := fsOpenFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile), 0) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							|  |  |  | 			return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 		return result, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 	defer rc.Close() | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 	fsMetaBytes, err := ioutil.ReadAll(rc) | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-09-18 09:51:16 +08:00
										 |  |  | 		return result, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | 	var fsMeta fsMetaV1 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	json := jsoniter.ConfigCompatibleWithStandardLibrary | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | 	if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { | 
					
						
							| 
									
										
										
										
											2019-08-20 02:35:52 +08:00
										 |  |  | 		return result, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 06:51:27 +08:00
										 |  |  | 	result.UserDefined = fsMeta.Meta | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	return result, nil | 
					
						
							| 
									
										
										
										
											2016-10-30 03:44:44 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | // CompleteMultipartUpload - completes an ongoing multipart
 | 
					
						
							|  |  |  | // transaction after receiving all the parts indicated by the client.
 | 
					
						
							|  |  |  | // Returns an md5sum calculated by concatenating all the individual
 | 
					
						
							|  |  |  | // md5sums of all the parts.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Implements S3 compatible Complete multipart API.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	var actualSize int64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return oi, toObjectErr(err) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							| 
									
										
										
										
											2017-06-22 10:53:09 +08:00
										 |  |  | 		return oi, toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-17 23:25:48 +08:00
										 |  |  | 	defer NSUpdated(bucket, object) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							|  |  |  | 	// Just check if the uploadID exists to avoid copy if it doesn't.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 			return oi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return oi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-21 16:18:13 +08:00
										 |  |  | 	// ensure that part ETag is canonicalized to strip off extraneous quotes
 | 
					
						
							|  |  |  | 	for i := range parts { | 
					
						
							|  |  |  | 		parts[i].ETag = canonicalizeETag(parts[i].ETag) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 	fsMeta := fsMetaV1{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Allocate parts similar to incoming slice.
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 	fsMeta.Parts = make([]ObjectPartInfo, len(parts)) | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	entries, err := readDir(uploadIDDir) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir) | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-21 16:18:13 +08:00
										 |  |  | 	// Create entries trie structure for prefix match
 | 
					
						
							|  |  |  | 	entriesTrie := trie.NewTrie() | 
					
						
							|  |  |  | 	for _, entry := range entries { | 
					
						
							|  |  |  | 		entriesTrie.Insert(entry) | 
					
						
							| 
									
										
										
										
											2019-04-02 03:19:52 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	// Save consolidated actual size.
 | 
					
						
							|  |  |  | 	var objectActualSize int64 | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 	// Validate all parts and then commit to disk.
 | 
					
						
							|  |  |  | 	for i, part := range parts { | 
					
						
							| 
									
										
										
										
											2020-09-21 16:18:13 +08:00
										 |  |  | 		partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag) | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		if partFile == "" { | 
					
						
							|  |  |  | 			return oi, InvalidPart{ | 
					
						
							|  |  |  | 				PartNumber: part.PartNumber, | 
					
						
							|  |  |  | 				GotETag:    part.ETag, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Read the actualSize from the pathFileName.
 | 
					
						
							|  |  |  | 		subParts := strings.Split(partFile, ".") | 
					
						
							|  |  |  | 		actualSize, err = strconv.ParseInt(subParts[len(subParts)-1], 10, 64) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return oi, InvalidPart{ | 
					
						
							|  |  |  | 				PartNumber: part.PartNumber, | 
					
						
							|  |  |  | 				GotETag:    part.ETag, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		partPath := pathJoin(uploadIDDir, partFile) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		var fi os.FileInfo | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		fi, err = fsStatFile(ctx, partPath) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 			if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 				return oi, InvalidPart{} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			return oi, err | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-01-06 06:16:43 +08:00
										 |  |  | 		fsMeta.Parts[i] = ObjectPartInfo{ | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 			Number:     part.PartNumber, | 
					
						
							|  |  |  | 			Size:       fi.Size(), | 
					
						
							|  |  |  | 			ActualSize: actualSize, | 
					
						
							| 
									
										
										
										
											2018-03-02 03:37:57 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		// Consolidate the actual size.
 | 
					
						
							|  |  |  | 		objectActualSize += actualSize | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		if i == len(parts)-1 { | 
					
						
							|  |  |  | 			break | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// All parts except the last part has to be atleast 5MB.
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 		if !isMinAllowedPartSize(actualSize) { | 
					
						
							| 
									
										
										
										
											2018-07-25 12:31:03 +08:00
										 |  |  | 			return oi, PartTooSmall{ | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 				PartNumber: part.PartNumber, | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 				PartSize:   actualSize, | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 				PartETag:   part.ETag, | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2017-10-18 03:01:28 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-23 19:04:04 +08:00
										 |  |  | 	appendFallback := true // In case background-append did not append the required parts.
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	appendFilePath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Most of the times appendFile would already be fully appended by now. We call fs.backgroundAppend()
 | 
					
						
							|  |  |  | 	// to take care of the following corner case:
 | 
					
						
							|  |  |  | 	// 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet.
 | 
					
						
							|  |  |  | 	// 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending
 | 
					
						
							|  |  |  | 	//    from the beginning
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	fs.backgroundAppend(ctx, bucket, object, uploadID) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	fs.appendFileMapMu.Lock() | 
					
						
							|  |  |  | 	file := fs.appendFileMap[uploadID] | 
					
						
							|  |  |  | 	delete(fs.appendFileMap, uploadID) | 
					
						
							|  |  |  | 	fs.appendFileMapMu.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if file != nil { | 
					
						
							|  |  |  | 		file.Lock() | 
					
						
							|  |  |  | 		defer file.Unlock() | 
					
						
							|  |  |  | 		// Verify that appendFile has all the parts.
 | 
					
						
							|  |  |  | 		if len(file.parts) == len(parts) { | 
					
						
							|  |  |  | 			for i := range parts { | 
					
						
							|  |  |  | 				if parts[i].ETag != file.parts[i].ETag { | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				if parts[i].PartNumber != file.parts[i].PartNumber { | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				if i == len(parts)-1 { | 
					
						
							|  |  |  | 					appendFilePath = file.filePath | 
					
						
							|  |  |  | 					appendFallback = false | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2016-11-21 15:42:53 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-11-21 15:42:53 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-01 15:56:03 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-21 15:42:53 +08:00
										 |  |  | 	if appendFallback { | 
					
						
							| 
									
										
										
										
											2018-10-02 00:50:09 +08:00
										 |  |  | 		if file != nil { | 
					
						
							|  |  |  | 			fsRemoveFile(ctx, file.filePath) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-07-07 23:41:29 +08:00
										 |  |  | 		for _, part := range parts { | 
					
						
							| 
									
										
										
										
											2020-09-21 16:18:13 +08:00
										 |  |  | 			partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag) | 
					
						
							|  |  |  | 			if partFile == "" { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, fmt.Errorf("%.5d.%s missing will not proceed", | 
					
						
							|  |  |  | 					part.PartNumber, part.ETag)) | 
					
						
							|  |  |  | 				return oi, InvalidPart{ | 
					
						
							|  |  |  | 					PartNumber: part.PartNumber, | 
					
						
							|  |  |  | 					GotETag:    part.ETag, | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-02-24 16:14:16 +08:00
										 |  |  | 			if err = xioutil.AppendFile(appendFilePath, pathJoin(uploadIDDir, partFile), globalFSOSync); err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 				return oi, toObjectErr(err) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	// Hold write lock on the object.
 | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	destLock := fs.NewNSLock(bucket, object) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := destLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							|  |  |  | 	defer destLock.Unlock(lkctx.Cancel) | 
					
						
							| 
									
										
										
										
											2020-09-18 15:16:16 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix) | 
					
						
							|  |  |  | 	fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile) | 
					
						
							|  |  |  | 	metaFile, err := fs.rwPool.Write(fsMetaPath) | 
					
						
							|  |  |  | 	var freshFile bool | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-09-18 15:16:16 +08:00
										 |  |  | 		if !errors.Is(err, errFileNotFound) { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			return oi, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		metaFile, err = fs.rwPool.Create(fsMetaPath) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			return oi, toObjectErr(err, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		freshFile = true | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 17:57:12 +08:00
										 |  |  | 	defer metaFile.Close() | 
					
						
							| 
									
										
										
										
											2020-09-18 15:16:16 +08:00
										 |  |  | 	defer func() { | 
					
						
							|  |  |  | 		// Remove meta file when CompleteMultipart encounters
 | 
					
						
							|  |  |  | 		// any error and it is a fresh file.
 | 
					
						
							|  |  |  | 		//
 | 
					
						
							|  |  |  | 		// We should preserve the `fs.json` of any
 | 
					
						
							|  |  |  | 		// existing object
 | 
					
						
							|  |  |  | 		if e != nil && freshFile { | 
					
						
							|  |  |  | 			tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) | 
					
						
							|  |  |  | 			fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2016-07-22 08:31:14 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	// Read saved fs metadata for ongoing multipart.
 | 
					
						
							| 
									
										
										
										
											2021-02-24 16:14:16 +08:00
										 |  |  | 	fsMetaBuf, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	err = json.Unmarshal(fsMetaBuf, &fsMeta) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-12 08:36:07 +08:00
										 |  |  | 	// Save additional metadata.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 02:37:22 +08:00
										 |  |  | 	if fsMeta.Meta == nil { | 
					
						
							| 
									
										
										
										
											2016-11-12 08:36:07 +08:00
										 |  |  | 		fsMeta.Meta = make(map[string]string) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-09-09 13:25:23 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	fsMeta.Meta["etag"] = opts.UserDefined["etag"] | 
					
						
							|  |  |  | 	if fsMeta.Meta["etag"] == "" { | 
					
						
							|  |  |  | 		fsMeta.Meta["etag"] = getCompleteMultipartMD5(parts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	// Save consolidated actual size.
 | 
					
						
							|  |  |  | 	fsMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	if _, err = fsMeta.WriteTo(metaFile); err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object)) | 
					
						
							| 
									
										
										
										
											2017-09-28 23:09:28 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return oi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-01-17 11:23:43 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-31 04:56:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Purge multipart folders
 | 
					
						
							|  |  |  | 	{ | 
					
						
							|  |  |  | 		fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID()) | 
					
						
							|  |  |  | 		defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background.
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-19 08:09:12 +08:00
										 |  |  | 		Rename(uploadIDDir, fsTmpObjPath) | 
					
						
							| 
									
										
										
										
											2020-05-31 04:56:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object)
 | 
					
						
							|  |  |  | 		fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) | 
					
						
							| 
									
										
										
										
											2017-01-17 11:23:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return oi, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2016-06-03 06:54:00 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-17 11:23:43 +08:00
										 |  |  | 	return fsMeta.ToObjectInfo(bucket, object, fi), nil | 
					
						
							| 
									
										
										
										
											2016-05-21 11:48:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | // AbortMultipartUpload - aborts an ongoing multipart operation
 | 
					
						
							|  |  |  | // signified by the input uploadID. This is an atomic operation
 | 
					
						
							|  |  |  | // doesn't require clients to initiate multiple such requests.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // All parts are purged from all disks and reference to the uploadID
 | 
					
						
							|  |  |  | // would be removed from the system, rollback is not possible on this
 | 
					
						
							|  |  |  | // operation.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // Implements S3 compatible Abort multipart API, slight difference is
 | 
					
						
							|  |  |  | // that this is an atomic idempotent operation. Subsequent calls have
 | 
					
						
							|  |  |  | // no affect and further requests to the same uploadID would not be
 | 
					
						
							|  |  |  | // honored.
 | 
					
						
							| 
									
										
										
										
											2020-09-15 06:57:13 +08:00
										 |  |  | func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil { | 
					
						
							| 
									
										
										
										
											2016-12-02 15:15:17 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	if _, err := fs.statBucketDir(ctx, bucket); err != nil { | 
					
						
							|  |  |  | 		return toObjectErr(err, bucket) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	fs.appendFileMapMu.Lock() | 
					
						
							| 
									
										
										
										
											2018-10-22 22:36:30 +08:00
										 |  |  | 	// Remove file in tmp folder
 | 
					
						
							|  |  |  | 	file := fs.appendFileMap[uploadID] | 
					
						
							|  |  |  | 	if file != nil { | 
					
						
							|  |  |  | 		fsRemoveFile(ctx, file.filePath) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	delete(fs.appendFileMap, uploadID) | 
					
						
							|  |  |  | 	fs.appendFileMapMu.Unlock() | 
					
						
							| 
									
										
										
										
											2017-09-28 23:09:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) | 
					
						
							|  |  |  | 	// Just check if the uploadID exists to avoid copy if it doesn't.
 | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if err == errFileNotFound || err == errFileAccessDenied { | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 			return InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-10-22 22:36:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-31 04:56:31 +08:00
										 |  |  | 	// Purge multipart folders
 | 
					
						
							|  |  |  | 	{ | 
					
						
							|  |  |  | 		fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID()) | 
					
						
							|  |  |  | 		defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background.
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-19 08:09:12 +08:00
										 |  |  | 		Rename(uploadIDDir, fsTmpObjPath) | 
					
						
							| 
									
										
										
										
											2017-01-17 09:05:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-31 04:56:31 +08:00
										 |  |  | 		// It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object)
 | 
					
						
							|  |  |  | 		fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object)) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-10-22 22:36:30 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | // Removes multipart uploads if any older than `expiry` duration
 | 
					
						
							|  |  |  | // on all buckets for every `cleanupInterval`, this function is
 | 
					
						
							|  |  |  | // blocking and should be run in a go-routine.
 | 
					
						
							| 
									
										
										
										
											2021-10-05 01:52:28 +08:00
										 |  |  | func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) { | 
					
						
							|  |  |  | 	timer := time.NewTimer(globalAPIConfig.getStaleUploadsCleanupInterval()) | 
					
						
							| 
									
										
										
										
											2021-02-06 11:23:48 +08:00
										 |  |  | 	defer timer.Stop() | 
					
						
							| 
									
										
										
										
											2018-05-05 01:43:20 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 	for { | 
					
						
							|  |  |  | 		select { | 
					
						
							| 
									
										
										
										
											2020-04-17 01:56:18 +08:00
										 |  |  | 		case <-ctx.Done(): | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			return | 
					
						
							| 
									
										
										
										
											2021-02-06 11:23:48 +08:00
										 |  |  | 		case <-timer.C: | 
					
						
							|  |  |  | 			// Reset for the next interval
 | 
					
						
							| 
									
										
										
										
											2021-10-05 01:52:28 +08:00
										 |  |  | 			timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			expiry := globalAPIConfig.getStaleUploadsExpiry() | 
					
						
							| 
									
										
										
										
											2021-02-06 11:23:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 			now := time.Now() | 
					
						
							|  |  |  | 			entries, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket)) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			for _, entry := range entries { | 
					
						
							|  |  |  | 				uploadIDs, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry)) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2019-10-17 02:57:52 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 				// Remove the trailing slash separator
 | 
					
						
							|  |  |  | 				for i := range uploadIDs { | 
					
						
							|  |  |  | 					uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 				for _, uploadID := range uploadIDs { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 					fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)) | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					if now.Sub(fi.ModTime()) > expiry { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 						fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)) | 
					
						
							| 
									
										
										
										
											2018-12-20 03:27:10 +08:00
										 |  |  | 						// It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object)
 | 
					
						
							|  |  |  | 						fsRemoveDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-10-17 02:57:52 +08:00
										 |  |  | 						// Remove uploadID from the append file map and its corresponding temporary file
 | 
					
						
							|  |  |  | 						fs.appendFileMapMu.Lock() | 
					
						
							|  |  |  | 						bgAppend, ok := fs.appendFileMap[uploadID] | 
					
						
							|  |  |  | 						if ok { | 
					
						
							| 
									
										
										
										
											2020-09-01 03:35:40 +08:00
										 |  |  | 							_ = fsRemoveFile(ctx, bgAppend.filePath) | 
					
						
							| 
									
										
										
										
											2019-10-17 02:57:52 +08:00
										 |  |  | 							delete(fs.appendFileMap, uploadID) | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 						fs.appendFileMapMu.Unlock() | 
					
						
							| 
									
										
										
										
											2018-02-01 05:17:24 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-06-03 03:18:56 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-05-03 07:57:31 +08:00
										 |  |  | } |