| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | /* | 
					
						
							|  |  |  |  * Minio Cloud Storage, (C) 2018 Minio, Inc. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"context" | 
					
						
							|  |  |  | 	"errors" | 
					
						
							|  |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							|  |  |  | 	"io/ioutil" | 
					
						
							|  |  |  | 	"net/http" | 
					
						
							|  |  |  | 	"os" | 
					
						
							|  |  |  | 	"sort" | 
					
						
							|  |  |  | 	"strconv" | 
					
						
							|  |  |  | 	"strings" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/djherbis/atime" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/crypto" | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/hash" | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/wildcard" | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const ( | 
					
						
							| 
									
										
										
										
											2018-10-16 13:36:42 +08:00
										 |  |  | 	// disk cache needs to have  object size space free for a cache entry to be created.
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	cacheTrashDir        = "trash" | 
					
						
							|  |  |  | 	cacheCleanupInterval = 10 // in minutes
 | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // abstract slice of cache drives backed by FS.
 | 
					
						
							|  |  |  | type diskCache struct { | 
					
						
							|  |  |  | 	cfs []*cacheFSObjects | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Abstracts disk caching - used by the S3 layer
 | 
					
						
							|  |  |  | type cacheObjects struct { | 
					
						
							|  |  |  | 	// pointer to disk cache
 | 
					
						
							|  |  |  | 	cache *diskCache | 
					
						
							|  |  |  | 	// ListObjects pool management.
 | 
					
						
							|  |  |  | 	listPool *treeWalkPool | 
					
						
							|  |  |  | 	// file path patterns to exclude from cache
 | 
					
						
							|  |  |  | 	exclude []string | 
					
						
							|  |  |  | 	// Object functions pointing to the corresponding functions of backend implementation.
 | 
					
						
							| 
									
										
										
										
											2018-09-27 18:06:45 +08:00
										 |  |  | 	GetObjectNInfoFn          func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	GetObjectFn               func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) | 
					
						
							|  |  |  | 	GetObjectInfoFn           func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	PutObjectFn               func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	DeleteObjectFn            func(ctx context.Context, bucket, object string) error | 
					
						
							|  |  |  | 	ListObjectsFn             func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) | 
					
						
							|  |  |  | 	ListObjectsV2Fn           func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) | 
					
						
							|  |  |  | 	ListBucketsFn             func(ctx context.Context) (buckets []BucketInfo, err error) | 
					
						
							|  |  |  | 	GetBucketInfoFn           func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	NewMultipartUploadFn      func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	PutObjectPartFn           func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	AbortMultipartUploadFn    func(ctx context.Context, bucket, object, uploadID string) error | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	DeleteBucketFn            func(ctx context.Context, bucket string) error | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-31 02:30:14 +08:00
										 |  |  | // CacheStorageInfo - represents total, free capacity of
 | 
					
						
							|  |  |  | // underlying cache storage.
 | 
					
						
							|  |  |  | type CacheStorageInfo struct { | 
					
						
							|  |  |  | 	Total uint64 // Total cache disk space.
 | 
					
						
							|  |  |  | 	Free  uint64 // Free cache available space.
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | // CacheObjectLayer implements primitives for cache object API layer.
 | 
					
						
							|  |  |  | type CacheObjectLayer interface { | 
					
						
							|  |  |  | 	// Bucket operations.
 | 
					
						
							|  |  |  | 	ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) | 
					
						
							|  |  |  | 	ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) | 
					
						
							|  |  |  | 	GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) | 
					
						
							|  |  |  | 	ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) | 
					
						
							|  |  |  | 	DeleteBucket(ctx context.Context, bucket string) error | 
					
						
							|  |  |  | 	// Object operations.
 | 
					
						
							| 
									
										
										
										
											2018-09-27 18:06:45 +08:00
										 |  |  | 	GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) | 
					
						
							|  |  |  | 	GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	PutObject(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	DeleteObject(ctx context.Context, bucket, object string) error | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Multipart operations.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Storage operations.
 | 
					
						
							| 
									
										
										
										
											2018-05-31 02:30:14 +08:00
										 |  |  | 	StorageInfo(ctx context.Context) CacheStorageInfo | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | // IsCacheable returns if the object should be saved in the cache.
 | 
					
						
							|  |  |  | func (o ObjectInfo) IsCacheable() bool { | 
					
						
							|  |  |  | 	return !crypto.IsEncrypted(o.UserDefined) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | // backendDownError returns true if err is due to backend failure or faulty disk if in server mode
 | 
					
						
							|  |  |  | func backendDownError(err error) bool { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 	_, backendDown := err.(BackendDown) | 
					
						
							|  |  |  | 	return backendDown || IsErr(err, baseErrs...) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // get cache disk where object is currently cached for a GET operation. If object does not exist at that location,
 | 
					
						
							|  |  |  | // treat the list of cache drives as a circular buffer and walk through them starting at hash index
 | 
					
						
							|  |  |  | // until an online drive is found.If object is not found, fall back to the first online cache drive
 | 
					
						
							|  |  |  | // closest to the hash index, so that object can be recached.
 | 
					
						
							|  |  |  | func (c diskCache) getCachedFSLoc(ctx context.Context, bucket, object string) (*cacheFSObjects, error) { | 
					
						
							|  |  |  | 	index := c.hashIndex(bucket, object) | 
					
						
							|  |  |  | 	numDisks := len(c.cfs) | 
					
						
							|  |  |  | 	// save first online cache disk closest to the hint index
 | 
					
						
							|  |  |  | 	var firstOnlineDisk *cacheFSObjects | 
					
						
							|  |  |  | 	for k := 0; k < numDisks; k++ { | 
					
						
							|  |  |  | 		i := (index + k) % numDisks | 
					
						
							|  |  |  | 		if c.cfs[i] == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if c.cfs[i].IsOnline() { | 
					
						
							|  |  |  | 			if firstOnlineDisk == nil { | 
					
						
							|  |  |  | 				firstOnlineDisk = c.cfs[i] | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if c.cfs[i].Exists(ctx, bucket, object) { | 
					
						
							|  |  |  | 				return c.cfs[i], nil | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if firstOnlineDisk != nil { | 
					
						
							|  |  |  | 		return firstOnlineDisk, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil, errDiskNotFound | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // choose a cache deterministically based on hash of bucket,object. The hash index is treated as
 | 
					
						
							|  |  |  | // a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives
 | 
					
						
							|  |  |  | // as a circular buffer and walk through them starting at hash index until an online drive is found.
 | 
					
						
							|  |  |  | func (c diskCache) getCacheFS(ctx context.Context, bucket, object string) (*cacheFSObjects, error) { | 
					
						
							|  |  |  | 	index := c.hashIndex(bucket, object) | 
					
						
							|  |  |  | 	numDisks := len(c.cfs) | 
					
						
							|  |  |  | 	for k := 0; k < numDisks; k++ { | 
					
						
							|  |  |  | 		i := (index + k) % numDisks | 
					
						
							|  |  |  | 		if c.cfs[i] == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if c.cfs[i].IsOnline() { | 
					
						
							|  |  |  | 			return c.cfs[i], nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil, errDiskNotFound | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Compute a unique hash sum for bucket and object
 | 
					
						
							|  |  |  | func (c diskCache) hashIndex(bucket, object string) int { | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 	return crcHashMod(pathJoin(bucket, object), len(c.cfs)) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // construct a metadata k-v map
 | 
					
						
							|  |  |  | func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string { | 
					
						
							|  |  |  | 	metadata := make(map[string]string) | 
					
						
							|  |  |  | 	metadata["etag"] = objInfo.ETag | 
					
						
							|  |  |  | 	metadata["content-type"] = objInfo.ContentType | 
					
						
							|  |  |  | 	metadata["content-encoding"] = objInfo.ContentEncoding | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for key, val := range objInfo.UserDefined { | 
					
						
							|  |  |  | 		metadata[key] = val | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return metadata | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-27 18:06:45 +08:00
										 |  |  | func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 		return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// fetch cacheFSObjects if object is currently cached or nearest available cache drive
 | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 		return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 	cacheReader, cacheErr := dcache.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts) | 
					
						
							|  |  |  | 	if backendDownError(err) && cacheErr == nil { | 
					
						
							|  |  |  | 		return cacheReader, nil | 
					
						
							|  |  |  | 	} else if err != nil { | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 		if _, ok := err.(ObjectNotFound); ok { | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 			// Delete cached entry if backend object was deleted.
 | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			dcache.Delete(ctx, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 	if !objInfo.IsCacheable() || filterFromCache(objInfo.UserDefined) { | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 		return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 	if cacheErr == nil { | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 		if cacheReader.ObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) { | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 			// Object is not stale, so serve from cache
 | 
					
						
							|  |  |  | 			return cacheReader, nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-10-11 14:01:24 +08:00
										 |  |  | 		cacheReader.Close() | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 		// Object is stale, so delete from cache
 | 
					
						
							|  |  |  | 		dcache.Delete(ctx, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Since we got here, we are serving the request from backend,
 | 
					
						
							|  |  |  | 	// and also adding the object to the cache.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if rs != nil { | 
					
						
							|  |  |  | 		// We don't cache partial objects.
 | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 		return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-10-16 13:36:42 +08:00
										 |  |  | 	if !dcache.diskAvailable(objInfo.Size) { | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 		return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-04 02:02:32 +08:00
										 |  |  | 	bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, writeLock, opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	if bkErr != nil { | 
					
						
							|  |  |  | 		return nil, bkErr | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Initialize pipe.
 | 
					
						
							|  |  |  | 	pipeReader, pipeWriter := io.Pipe() | 
					
						
							|  |  |  | 	teeReader := io.TeeReader(bkReader, pipeWriter) | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	if herr != nil { | 
					
						
							|  |  |  | 		bkReader.Close() | 
					
						
							|  |  |  | 		return nil, herr | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	go func() { | 
					
						
							|  |  |  | 		opts := ObjectOptions{} | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(bkReader.ObjInfo), opts) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 		// close the write end of the pipe, so the error gets
 | 
					
						
							|  |  |  | 		// propagated to getObjReader
 | 
					
						
							|  |  |  | 		pipeWriter.CloseWithError(putErr) | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	cleanupBackend := func() { bkReader.Close() } | 
					
						
							| 
									
										
										
										
											2018-09-22 02:42:06 +08:00
										 |  |  | 	cleanupPipe := func() { pipeReader.Close() } | 
					
						
							|  |  |  | 	gr = NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, cleanupBackend, cleanupPipe) | 
					
						
							| 
									
										
										
										
											2018-09-21 10:22:09 +08:00
										 |  |  | 	return gr, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | // Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
 | 
					
						
							|  |  |  | // stores it in the cache for serving subsequent requests.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	GetObjectFn := c.GetObjectFn | 
					
						
							|  |  |  | 	GetObjectInfoFn := c.GetObjectInfoFn | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// fetch cacheFSObjects if object is currently cached or nearest available cache drive
 | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// stat object on backend
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	objInfo, err := GetObjectInfoFn(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	backendDown := backendDownError(err) | 
					
						
							|  |  |  | 	if err != nil && !backendDown { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if _, ok := err.(ObjectNotFound); ok { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			// Delete the cached entry if backend object was deleted.
 | 
					
						
							|  |  |  | 			dcache.Delete(ctx, bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-18 01:57:12 +08:00
										 |  |  | 	if !backendDown && !objInfo.IsCacheable() { | 
					
						
							|  |  |  | 		return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if !backendDown && filterFromCache(objInfo.UserDefined) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		if backendDown { | 
					
						
							|  |  |  | 			// If the backend is down, serve the request from cache.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 			return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if cachedObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 			return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		dcache.Delete(ctx, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-11-20 03:00:46 +08:00
										 |  |  | 	if startOffset != 0 || (length > 0 && length != objInfo.Size) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		// We don't cache partial objects.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-10-16 13:36:42 +08:00
										 |  |  | 	if !dcache.diskAvailable(objInfo.Size) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// Initialize pipe.
 | 
					
						
							|  |  |  | 	pipeReader, pipeWriter := io.Pipe() | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2018-09-28 15:44:59 +08:00
										 |  |  | 		gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts) | 
					
						
							|  |  |  | 		pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(objInfo), opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	pipeReader.Close() | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns ObjectInfo from cache if available.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	getObjectInfoFn := c.GetObjectInfoFn | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return getObjectInfoFn(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// fetch cacheFSObjects if object is currently cached or nearest available cache drive
 | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return getObjectInfoFn(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	objInfo, err := getObjectInfoFn(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-11 00:36:37 +08:00
										 |  |  | 		if _, ok := err.(ObjectNotFound); ok { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			// Delete the cached entry if backend object was deleted.
 | 
					
						
							|  |  |  | 			dcache.Delete(ctx, bucket, object) | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if !backendDownError(err) { | 
					
						
							|  |  |  | 			return ObjectInfo{}, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// when backend is down, serve from cache.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		if cerr == nil { | 
					
						
							|  |  |  | 			return cachedObjInfo, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return ObjectInfo{}, BackendDown{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// when backend is up, do a sanity check on cached object
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return objInfo, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if cachedObjInfo.ETag != objInfo.ETag { | 
					
						
							|  |  |  | 		// Delete the cached entry if the backend object was replaced.
 | 
					
						
							|  |  |  | 		dcache.Delete(ctx, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return objInfo, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns function "listDir" of the type listDirFunc.
 | 
					
						
							|  |  |  | // isLeaf - is used by listDir function to check if an entry is a leaf or non-leaf entry.
 | 
					
						
							|  |  |  | // disks - list of fsObjects
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | func listDirCacheFactory(isLeaf isLeafFunc, disks []*cacheFSObjects) listDirFunc { | 
					
						
							|  |  |  | 	listCacheDirs := func(bucket, prefixDir, prefixEntry string) (dirs []string) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		var entries []string | 
					
						
							|  |  |  | 		for _, disk := range disks { | 
					
						
							| 
									
										
										
										
											2018-04-26 13:09:05 +08:00
										 |  |  | 			// ignore disk-caches that might be missing/offline
 | 
					
						
							|  |  |  | 			if disk == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			fs := disk.FSObjects | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 			var err error | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir)) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Filter entries that have the prefix prefixEntry.
 | 
					
						
							|  |  |  | 			entries = filterMatchingPrefix(entries, prefixEntry) | 
					
						
							|  |  |  | 			dirs = append(dirs, entries...) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		return dirs | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// listDir - lists all the entries at a given prefix and given entry in the prefix.
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 	listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool) { | 
					
						
							|  |  |  | 		cacheEntries := listCacheDirs(bucket, prefixDir, prefixEntry) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		for _, entry := range cacheEntries { | 
					
						
							|  |  |  | 			// Find elements in entries which are not in mergedEntries
 | 
					
						
							|  |  |  | 			idx := sort.SearchStrings(mergedEntries, entry) | 
					
						
							|  |  |  | 			// if entry is already present in mergedEntries don't add.
 | 
					
						
							|  |  |  | 			if idx < len(mergedEntries) && mergedEntries[idx] == entry { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			mergedEntries = append(mergedEntries, entry) | 
					
						
							|  |  |  | 			sort.Strings(mergedEntries) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		return mergedEntries, false | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return listDir | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // List all objects at prefix upto maxKeys, optionally delimited by '/' from the cache. Maintains the list pool
 | 
					
						
							|  |  |  | // state for future re-entrant list requests.
 | 
					
						
							|  |  |  | func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { | 
					
						
							|  |  |  | 	var objInfos []ObjectInfo | 
					
						
							|  |  |  | 	var eof bool | 
					
						
							|  |  |  | 	var nextMarker string | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	recursive := true | 
					
						
							|  |  |  | 	if delimiter == slashSeparator { | 
					
						
							|  |  |  | 		recursive = false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	walkResultCh, endWalkCh := c.listPool.Release(listParams{bucket, recursive, marker, prefix, false}) | 
					
						
							|  |  |  | 	if walkResultCh == nil { | 
					
						
							|  |  |  | 		endWalkCh = make(chan struct{}) | 
					
						
							|  |  |  | 		isLeaf := func(bucket, object string) bool { | 
					
						
							|  |  |  | 			fs, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return false | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			_, err = fs.getObjectInfo(ctx, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			return err == nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		isLeafDir := func(bucket, object string) bool { | 
					
						
							|  |  |  | 			fs, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return false | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return fs.isObjectDir(bucket, object) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-07-28 06:32:19 +08:00
										 |  |  | 		listDir := listDirCacheFactory(isLeaf, c.cache.cfs) | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i := 0; i < maxKeys; { | 
					
						
							|  |  |  | 		walkResult, ok := <-walkResultCh | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			// Closed channel.
 | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// For any walk error return right away.
 | 
					
						
							|  |  |  | 		if walkResult.err != nil { | 
					
						
							|  |  |  | 			return result, toObjectErr(walkResult.err, bucket, prefix) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		entry := walkResult.entry | 
					
						
							|  |  |  | 		var objInfo ObjectInfo | 
					
						
							|  |  |  | 		if hasSuffix(entry, slashSeparator) { | 
					
						
							|  |  |  | 			// Object name needs to be full path.
 | 
					
						
							|  |  |  | 			objInfo.Bucket = bucket | 
					
						
							|  |  |  | 			objInfo.Name = entry | 
					
						
							|  |  |  | 			objInfo.IsDir = true | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			// Set the Mode to a "regular" file.
 | 
					
						
							|  |  |  | 			var err error | 
					
						
							|  |  |  | 			fs, err := c.cache.getCacheFS(ctx, bucket, entry) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-26 13:09:05 +08:00
										 |  |  | 				// Ignore errDiskNotFound
 | 
					
						
							|  |  |  | 				if err == errDiskNotFound { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return result, toObjectErr(err, bucket, prefix) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 			objInfo, err = fs.getObjectInfo(ctx, bucket, entry) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-26 13:09:05 +08:00
										 |  |  | 				// Ignore ObjectNotFound error
 | 
					
						
							|  |  |  | 				if _, ok := err.(ObjectNotFound); ok { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return result, toObjectErr(err, bucket, prefix) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		nextMarker = objInfo.Name | 
					
						
							|  |  |  | 		objInfos = append(objInfos, objInfo) | 
					
						
							|  |  |  | 		i++ | 
					
						
							|  |  |  | 		if walkResult.end { | 
					
						
							|  |  |  | 			eof = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	params := listParams{bucket, recursive, nextMarker, prefix, false} | 
					
						
							|  |  |  | 	if !eof { | 
					
						
							|  |  |  | 		c.listPool.Set(params, walkResultCh, endWalkCh) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result = ListObjectsInfo{IsTruncated: !eof} | 
					
						
							|  |  |  | 	for _, objInfo := range objInfos { | 
					
						
							|  |  |  | 		result.NextMarker = objInfo.Name | 
					
						
							| 
									
										
										
										
											2018-05-09 10:08:21 +08:00
										 |  |  | 		if objInfo.IsDir && delimiter == slashSeparator { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			result.Prefixes = append(result.Prefixes, objInfo.Name) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		result.Objects = append(result.Objects, objInfo) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return result, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // listCacheV2Objects lists all blobs in bucket filtered by prefix from the cache
 | 
					
						
							|  |  |  | func (c cacheObjects) listCacheV2Objects(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { | 
					
						
							|  |  |  | 	loi, err := c.listCacheObjects(ctx, bucket, prefix, continuationToken, delimiter, maxKeys) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return result, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	listObjectsV2Info := ListObjectsV2Info{ | 
					
						
							|  |  |  | 		IsTruncated:           loi.IsTruncated, | 
					
						
							|  |  |  | 		ContinuationToken:     continuationToken, | 
					
						
							|  |  |  | 		NextContinuationToken: loi.NextMarker, | 
					
						
							|  |  |  | 		Objects:               loi.Objects, | 
					
						
							|  |  |  | 		Prefixes:              loi.Prefixes, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return listObjectsV2Info, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // List all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
 | 
					
						
							|  |  |  | // state for future re-entrant list requests. Retrieve from cache if backend is down
 | 
					
						
							|  |  |  | func (c cacheObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	listObjectsFn := c.ListObjectsFn | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result, err = listObjectsFn(ctx, bucket, prefix, marker, delimiter, maxKeys) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if backendDownError(err) { | 
					
						
							|  |  |  | 			return c.listCacheObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // ListObjectsV2 lists all blobs in bucket filtered by prefix
 | 
					
						
							|  |  |  | func (c cacheObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { | 
					
						
							|  |  |  | 	listObjectsV2Fn := c.ListObjectsV2Fn | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	result, err = listObjectsV2Fn(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if backendDownError(err) { | 
					
						
							|  |  |  | 			return c.listCacheV2Objects(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Lists all the buckets in the cache
 | 
					
						
							|  |  |  | func (c cacheObjects) listBuckets(ctx context.Context) (buckets []BucketInfo, err error) { | 
					
						
							|  |  |  | 	m := make(map[string]string) | 
					
						
							|  |  |  | 	for _, cache := range c.cache.cfs { | 
					
						
							| 
									
										
										
										
											2018-04-26 13:09:05 +08:00
										 |  |  | 		// ignore disk-caches that might be missing/offline
 | 
					
						
							|  |  |  | 		if cache == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		entries, err := cache.ListBuckets(ctx) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		for _, entry := range entries { | 
					
						
							|  |  |  | 			_, ok := m[entry.Name] | 
					
						
							|  |  |  | 			if !ok { | 
					
						
							|  |  |  | 				m[entry.Name] = entry.Name | 
					
						
							|  |  |  | 				buckets = append(buckets, entry) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Sort bucket infos by bucket name.
 | 
					
						
							|  |  |  | 	sort.Sort(byBucketName(buckets)) | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns list of buckets from cache or the backend. If the backend is down, buckets
 | 
					
						
							|  |  |  | // available on cache are served.
 | 
					
						
							|  |  |  | func (c cacheObjects) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { | 
					
						
							|  |  |  | 	listBucketsFn := c.ListBucketsFn | 
					
						
							|  |  |  | 	buckets, err = listBucketsFn(ctx) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if backendDownError(err) { | 
					
						
							|  |  |  | 			return c.listBuckets(ctx) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return []BucketInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns bucket info from cache if backend is down.
 | 
					
						
							|  |  |  | func (c cacheObjects) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { | 
					
						
							|  |  |  | 	getBucketInfoFn := c.GetBucketInfoFn | 
					
						
							|  |  |  | 	bucketInfo, err = getBucketInfoFn(ctx, bucket) | 
					
						
							|  |  |  | 	if backendDownError(err) { | 
					
						
							|  |  |  | 		for _, cache := range c.cache.cfs { | 
					
						
							| 
									
										
										
										
											2018-04-26 13:09:05 +08:00
										 |  |  | 			// ignore disk-caches that might be missing/offline
 | 
					
						
							|  |  |  | 			if cache == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			if bucketInfo, err = cache.GetBucketInfo(ctx, bucket); err == nil { | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Delete Object deletes from cache as well if backend operation succeeds
 | 
					
						
							|  |  |  | func (c cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { | 
					
						
							|  |  |  | 	if err = c.DeleteObjectFn(ctx, bucket, object); err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	dcache, cerr := c.cache.getCachedFSLoc(ctx, bucket, object) | 
					
						
							|  |  |  | 	if cerr == nil { | 
					
						
							|  |  |  | 		_ = dcache.DeleteObject(ctx, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns true if object should be excluded from cache
 | 
					
						
							|  |  |  | func (c cacheObjects) isCacheExclude(bucket, object string) bool { | 
					
						
							|  |  |  | 	for _, pattern := range c.exclude { | 
					
						
							|  |  |  | 		matchStr := fmt.Sprintf("%s/%s", bucket, object) | 
					
						
							|  |  |  | 		if ok := wildcard.MatchSimple(pattern, matchStr); ok { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return false | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObject - caches the uploaded object for single Put operations
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	putObjectFn := c.PutObjectFn | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	data := r.Reader | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	dcache, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// disk cache could not be located,execute backend call.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return putObjectFn(ctx, bucket, object, r, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	size := r.Size() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// fetch from backend if there is no space on cache drive
 | 
					
						
							| 
									
										
										
										
											2018-10-16 13:36:42 +08:00
										 |  |  | 	if !dcache.diskAvailable(size) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return putObjectFn(ctx, bucket, object, r, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// fetch from backend if cache exclude pattern or cache-control
 | 
					
						
							|  |  |  | 	// directive set to exclude
 | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) || filterFromCache(metadata) { | 
					
						
							|  |  |  | 		dcache.Delete(ctx, bucket, object) | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return putObjectFn(ctx, bucket, object, r, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	objInfo = ObjectInfo{} | 
					
						
							|  |  |  | 	// Initialize pipe to stream data to backend
 | 
					
						
							|  |  |  | 	pipeReader, pipeWriter := io.Pipe() | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Initialize pipe to stream data to cache
 | 
					
						
							|  |  |  | 	rPipe, wPipe := io.Pipe() | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return ObjectInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	oinfoCh := make(chan ObjectInfo) | 
					
						
							|  |  |  | 	errCh := make(chan error) | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		if perr != nil { | 
					
						
							|  |  |  | 			pipeWriter.CloseWithError(perr) | 
					
						
							|  |  |  | 			wPipe.CloseWithError(perr) | 
					
						
							|  |  |  | 			close(oinfoCh) | 
					
						
							|  |  |  | 			errCh <- perr | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		close(errCh) | 
					
						
							|  |  |  | 		oinfoCh <- oinfo | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), metadata, opts); err != nil { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			wPipe.CloseWithError(err) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mwriter := io.MultiWriter(pipeWriter, wPipe) | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	_, err = io.Copy(mwriter, data) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		err = <-errCh | 
					
						
							|  |  |  | 		return objInfo, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	pipeWriter.Close() | 
					
						
							|  |  |  | 	wPipe.Close() | 
					
						
							|  |  |  | 	objInfo = <-oinfoCh | 
					
						
							|  |  |  | 	return objInfo, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	newMultipartUploadFn := c.NewMultipartUploadFn | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) || filterFromCache(metadata) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return newMultipartUploadFn(ctx, bucket, object, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// disk cache could not be located,execute backend call.
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		return newMultipartUploadFn(ctx, bucket, object, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// create new multipart upload in cache with same uploadID
 | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 	dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	return uploadID, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObjectPart - uploads part to backend and cache simultaneously.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { | 
					
						
							|  |  |  | 	data := r.Reader | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	putObjectPartFn := c.PutObjectPartFn | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// disk cache could not be located,execute backend call.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-10-16 13:36:42 +08:00
										 |  |  | 	// make sure cache has at least size space available
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	size := data.Size() | 
					
						
							| 
									
										
										
										
											2018-10-16 13:36:42 +08:00
										 |  |  | 	if !dcache.diskAvailable(size) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		select { | 
					
						
							|  |  |  | 		case dcache.purgeChan <- struct{}{}: | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	info = PartInfo{} | 
					
						
							|  |  |  | 	// Initialize pipe to stream data to backend
 | 
					
						
							|  |  |  | 	pipeReader, pipeWriter := io.Pipe() | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Initialize pipe to stream data to cache
 | 
					
						
							|  |  |  | 	rPipe, wPipe := io.Pipe() | 
					
						
							| 
									
										
										
										
											2018-09-28 11:36:17 +08:00
										 |  |  | 	cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	pinfoCh := make(chan PartInfo) | 
					
						
							|  |  |  | 	errorCh := make(chan error) | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			close(pinfoCh) | 
					
						
							|  |  |  | 			pipeWriter.CloseWithError(err) | 
					
						
							|  |  |  | 			wPipe.CloseWithError(err) | 
					
						
							|  |  |  | 			errorCh <- err | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		close(errorCh) | 
					
						
							|  |  |  | 		pinfoCh <- info | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 			wPipe.CloseWithError(perr) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mwriter := io.MultiWriter(pipeWriter, wPipe) | 
					
						
							|  |  |  | 	_, err = io.Copy(mwriter, data) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		err = <-errorCh | 
					
						
							|  |  |  | 		return PartInfo{}, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	pipeWriter.Close() | 
					
						
							|  |  |  | 	wPipe.Close() | 
					
						
							|  |  |  | 	info = <-pinfoCh | 
					
						
							|  |  |  | 	return info, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // AbortMultipartUpload - aborts multipart upload on backend and cache.
 | 
					
						
							|  |  |  | func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { | 
					
						
							|  |  |  | 	abortMultipartUploadFn := c.AbortMultipartUploadFn | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							|  |  |  | 		return abortMultipartUploadFn(ctx, bucket, object, uploadID) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// disk cache could not be located,execute backend call.
 | 
					
						
							|  |  |  | 		return abortMultipartUploadFn(ctx, bucket, object, uploadID) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// execute backend operation
 | 
					
						
							|  |  |  | 	err = abortMultipartUploadFn(ctx, bucket, object, uploadID) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// abort multipart upload on cache
 | 
					
						
							|  |  |  | 	dcache.AbortMultipartUpload(ctx, bucket, object, uploadID) | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CompleteMultipartUpload - completes multipart upload operation on backend and cache.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	completeMultipartUploadFn := c.CompleteMultipartUploadFn | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if c.isCacheExclude(bucket, object) { | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	dcache, err := c.cache.getCacheFS(ctx, bucket, object) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// disk cache could not be located,execute backend call.
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// perform backend operation
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// create new multipart upload in cache with same uploadID
 | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 	dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // StorageInfo - returns underlying storage statistics.
 | 
					
						
							| 
									
										
										
										
											2018-05-31 02:30:14 +08:00
										 |  |  | func (c cacheObjects) StorageInfo(ctx context.Context) (cInfo CacheStorageInfo) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	var total, free uint64 | 
					
						
							|  |  |  | 	for _, cfs := range c.cache.cfs { | 
					
						
							|  |  |  | 		if cfs == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-05-23 18:11:29 +08:00
										 |  |  | 		info, err := getDiskInfo(cfs.fsPath) | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		logger.GetReqInfo(ctx).AppendTags("cachePath", cfs.fsPath) | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		total += info.Total | 
					
						
							|  |  |  | 		free += info.Free | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-05-31 02:30:14 +08:00
										 |  |  | 	return CacheStorageInfo{ | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		Total: total, | 
					
						
							|  |  |  | 		Free:  free, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // DeleteBucket - marks bucket to be deleted from cache if bucket is deleted from backend.
 | 
					
						
							|  |  |  | func (c cacheObjects) DeleteBucket(ctx context.Context, bucket string) (err error) { | 
					
						
							|  |  |  | 	deleteBucketFn := c.DeleteBucketFn | 
					
						
							|  |  |  | 	var toDel []*cacheFSObjects | 
					
						
							|  |  |  | 	for _, cfs := range c.cache.cfs { | 
					
						
							| 
									
										
										
										
											2018-04-26 13:09:05 +08:00
										 |  |  | 		// ignore disk-caches that might be missing/offline
 | 
					
						
							|  |  |  | 		if cfs == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		if _, cerr := cfs.GetBucketInfo(ctx, bucket); cerr == nil { | 
					
						
							|  |  |  | 			toDel = append(toDel, cfs) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// perform backend operation
 | 
					
						
							|  |  |  | 	err = deleteBucketFn(ctx, bucket) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// move bucket metadata and content to cache's trash dir
 | 
					
						
							|  |  |  | 	for _, d := range toDel { | 
					
						
							|  |  |  | 		d.moveBucketToTrash(ctx, bucket) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // newCache initializes the cacheFSObjects for the "drives" specified in config.json
 | 
					
						
							|  |  |  | // or the global env overrides.
 | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | func newCache(config CacheConfig) (*diskCache, error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	var cfsObjects []*cacheFSObjects | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{}) | 
					
						
							|  |  |  | 	formats, err := loadAndValidateCacheFormat(ctx, config.Drives) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 		return nil, err | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 	for i, dir := range config.Drives { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		// skip cacheFSObjects creation for cache drives missing a format.json
 | 
					
						
							|  |  |  | 		if formats[i] == nil { | 
					
						
							|  |  |  | 			cfsObjects = append(cfsObjects, nil) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if err := checkAtimeSupport(dir); err != nil { | 
					
						
							|  |  |  | 			return nil, errors.New("Atime support required for disk caching") | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-06-26 01:24:12 +08:00
										 |  |  | 		cache, err := newCacheFSObjects(dir, config.Expiry, config.MaxUse) | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		// Start the purging go-routine for entries that have expired
 | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 		go cache.purge() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		// Start trash purge routine for deleted buckets.
 | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 		go cache.purgeTrash() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		cfsObjects = append(cfsObjects, cache) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return &diskCache{cfs: cfsObjects}, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Return error if Atime is disabled on the O/S
 | 
					
						
							|  |  |  | func checkAtimeSupport(dir string) (err error) { | 
					
						
							|  |  |  | 	file, err := ioutil.TempFile(dir, "prefix") | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer os.Remove(file.Name()) | 
					
						
							|  |  |  | 	finfo1, err := os.Stat(file.Name()) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if _, err = io.Copy(ioutil.Discard, file); err != io.EOF { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	finfo2, err := os.Stat(file.Name()) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if atime.Get(finfo2).Equal(atime.Get(finfo1)) { | 
					
						
							|  |  |  | 		return errors.New("Atime not supported") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Returns cacheObjects for use by Server.
 | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) { | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	// list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var.
 | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 	dcache, err := newCache(config) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return &cacheObjects{ | 
					
						
							|  |  |  | 		cache:    dcache, | 
					
						
							| 
									
										
										
										
											2018-03-30 05:38:26 +08:00
										 |  |  | 		exclude:  config.Exclude, | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		listPool: newTreeWalkPool(globalLookupTimeout), | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error { | 
					
						
							|  |  |  | 			return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-10-11 14:01:24 +08:00
										 |  |  | 		GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 			return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		}, | 
					
						
							|  |  |  | 		DeleteObjectFn: func(ctx context.Context, bucket, object string) error { | 
					
						
							|  |  |  | 			return newObjectLayerFn().DeleteObject(ctx, bucket, object) | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		ListObjectsFn: func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		ListObjectsV2Fn: func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, fetchOwner, startAfter) | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		ListBucketsFn: func(ctx context.Context) (buckets []BucketInfo, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().ListBuckets(ctx) | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().GetBucketInfo(ctx, bucket) | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 		NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { | 
					
						
							| 
									
										
										
										
											2018-09-11 00:42:43 +08:00
										 |  |  | 			return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		}, | 
					
						
							|  |  |  | 		AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error { | 
					
						
							|  |  |  | 			return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID) | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2018-11-15 09:36:41 +08:00
										 |  |  | 		CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) { | 
					
						
							|  |  |  | 			return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) | 
					
						
							| 
									
										
										
										
											2018-03-29 05:14:06 +08:00
										 |  |  | 		}, | 
					
						
							|  |  |  | 		DeleteBucketFn: func(ctx context.Context, bucket string) error { | 
					
						
							|  |  |  | 			return newObjectLayerFn().DeleteBucket(ctx, bucket) | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	}, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type cacheControl struct { | 
					
						
							|  |  |  | 	exclude  bool | 
					
						
							|  |  |  | 	expiry   time.Time | 
					
						
							|  |  |  | 	maxAge   int | 
					
						
							|  |  |  | 	sMaxAge  int | 
					
						
							|  |  |  | 	minFresh int | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // cache exclude directives in cache-control header
 | 
					
						
							|  |  |  | var cacheExcludeDirectives = []string{ | 
					
						
							|  |  |  | 	"no-cache", | 
					
						
							|  |  |  | 	"no-store", | 
					
						
							|  |  |  | 	"must-revalidate", | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // returns true if cache exclude directives are set.
 | 
					
						
							|  |  |  | func isCacheExcludeDirective(s string) bool { | 
					
						
							|  |  |  | 	for _, directive := range cacheExcludeDirectives { | 
					
						
							|  |  |  | 		if s == directive { | 
					
						
							|  |  |  | 			return true | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return false | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // returns struct with cache-control settings from user metadata.
 | 
					
						
							|  |  |  | func getCacheControlOpts(m map[string]string) (c cacheControl, err error) { | 
					
						
							|  |  |  | 	var headerVal string | 
					
						
							|  |  |  | 	for k, v := range m { | 
					
						
							|  |  |  | 		if k == "cache-control" { | 
					
						
							|  |  |  | 			headerVal = v | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if k == "expires" { | 
					
						
							|  |  |  | 			if e, err := http.ParseTime(v); err == nil { | 
					
						
							|  |  |  | 				c.expiry = e | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if headerVal == "" { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	headerVal = strings.ToLower(headerVal) | 
					
						
							|  |  |  | 	headerVal = strings.TrimSpace(headerVal) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	vals := strings.Split(headerVal, ",") | 
					
						
							|  |  |  | 	for _, val := range vals { | 
					
						
							|  |  |  | 		val = strings.TrimSpace(val) | 
					
						
							|  |  |  | 		p := strings.Split(val, "=") | 
					
						
							|  |  |  | 		if isCacheExcludeDirective(p[0]) { | 
					
						
							|  |  |  | 			c.exclude = true | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if len(p) != 2 { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if p[0] == "max-age" || | 
					
						
							|  |  |  | 			p[0] == "s-maxage" || | 
					
						
							|  |  |  | 			p[0] == "min-fresh" { | 
					
						
							|  |  |  | 			i, err := strconv.Atoi(p[1]) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return c, err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if p[0] == "max-age" { | 
					
						
							|  |  |  | 				c.maxAge = i | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if p[0] == "s-maxage" { | 
					
						
							|  |  |  | 				c.sMaxAge = i | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if p[0] == "min-fresh" { | 
					
						
							|  |  |  | 				c.minFresh = i | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return c, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // return true if metadata has a cache-control header
 | 
					
						
							|  |  |  | // directive to exclude object from cache.
 | 
					
						
							|  |  |  | func filterFromCache(m map[string]string) bool { | 
					
						
							|  |  |  | 	c, err := getCacheControlOpts(m) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return c.exclude | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // returns true if cache expiry conditions met in cache-control/expiry metadata.
 | 
					
						
							|  |  |  | func isStaleCache(objInfo ObjectInfo) bool { | 
					
						
							|  |  |  | 	c, err := getCacheControlOpts(objInfo.UserDefined) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	now := time.Now() | 
					
						
							|  |  |  | 	if c.sMaxAge > 0 && c.sMaxAge > int(now.Sub(objInfo.ModTime).Seconds()) { | 
					
						
							|  |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if c.maxAge > 0 && c.maxAge > int(now.Sub(objInfo.ModTime).Seconds()) { | 
					
						
							|  |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !c.expiry.Equal(time.Time{}) && c.expiry.Before(time.Now()) { | 
					
						
							|  |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if c.minFresh > 0 && c.minFresh <= int(now.Sub(objInfo.ModTime).Seconds()) { | 
					
						
							|  |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return false | 
					
						
							|  |  |  | } |