| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | /* | 
					
						
							| 
									
										
										
										
											2020-03-13 09:57:41 +08:00
										 |  |  |  * MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc. | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  |  * | 
					
						
							|  |  |  |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  |  * you may not use this file except in compliance with the License. | 
					
						
							|  |  |  |  * You may obtain a copy of the License at | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  |  * distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  |  * See the License for the specific language governing permissions and | 
					
						
							|  |  |  |  * limitations under the License. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"bytes" | 
					
						
							|  |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	"crypto/rand" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"encoding/hex" | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							|  |  |  | 	"io/ioutil" | 
					
						
							|  |  |  | 	"net/http" | 
					
						
							|  |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 	"sync/atomic" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/djherbis/atime" | 
					
						
							| 
									
										
										
										
											2020-06-30 04:25:29 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/config/cache" | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/crypto" | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	xhttp "github.com/minio/minio/cmd/http" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							|  |  |  | 	"github.com/minio/minio/pkg/disk" | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	"github.com/minio/sio" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const ( | 
					
						
							|  |  |  | 	// cache.json object metadata for cached objects.
 | 
					
						
							|  |  |  | 	cacheMetaJSONFile = "cache.json" | 
					
						
							|  |  |  | 	cacheDataFile     = "part.1" | 
					
						
							|  |  |  | 	cacheMetaVersion  = "1.0.0" | 
					
						
							| 
									
										
										
										
											2020-08-25 03:11:20 +08:00
										 |  |  | 	cacheExpiryDays   = 90 * time.Hour * 24 // defaults to 90 days
 | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	// SSECacheEncrypted is the metadata key indicating that the object
 | 
					
						
							|  |  |  | 	// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
 | 
					
						
							|  |  |  | 	SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CacheChecksumInfoV1 - carries checksums of individual blocks on disk.
 | 
					
						
							|  |  |  | type CacheChecksumInfoV1 struct { | 
					
						
							|  |  |  | 	Algorithm string `json:"algorithm"` | 
					
						
							|  |  |  | 	Blocksize int64  `json:"blocksize"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Represents the cache metadata struct
 | 
					
						
							|  |  |  | type cacheMeta struct { | 
					
						
							|  |  |  | 	Version string   `json:"version"` | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	Stat    StatInfo `json:"stat"` // Stat of the current object `cache.json`.
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// checksums of blocks on disk.
 | 
					
						
							|  |  |  | 	Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"` | 
					
						
							|  |  |  | 	// Metadata map for current object.
 | 
					
						
							|  |  |  | 	Meta map[string]string `json:"meta,omitempty"` | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// Ranges maps cached range to associated filename.
 | 
					
						
							|  |  |  | 	Ranges map[string]string `json:"ranges,omitempty"` | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// Hits is a counter on the number of times this object has been accessed so far.
 | 
					
						
							|  |  |  | 	Hits int `json:"hits,omitempty"` | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // RangeInfo has the range, file and range length information for a cached range.
 | 
					
						
							|  |  |  | type RangeInfo struct { | 
					
						
							|  |  |  | 	Range string | 
					
						
							|  |  |  | 	File  string | 
					
						
							|  |  |  | 	Size  int64 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Empty returns true if this is an empty struct
 | 
					
						
							|  |  |  | func (r *RangeInfo) Empty() bool { | 
					
						
							|  |  |  | 	return r.Range == "" && r.File == "" && r.Size == 0 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (m *cacheMeta) ToObjectInfo(bucket, object string) (o ObjectInfo) { | 
					
						
							|  |  |  | 	if len(m.Meta) == 0 { | 
					
						
							|  |  |  | 		m.Meta = make(map[string]string) | 
					
						
							|  |  |  | 		m.Stat.ModTime = timeSentinel | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	o = ObjectInfo{ | 
					
						
							| 
									
										
										
										
											2020-01-11 12:21:13 +08:00
										 |  |  | 		Bucket:            bucket, | 
					
						
							|  |  |  | 		Name:              object, | 
					
						
							|  |  |  | 		CacheStatus:       CacheHit, | 
					
						
							|  |  |  | 		CacheLookupStatus: CacheHit, | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// We set file info only if its valid.
 | 
					
						
							|  |  |  | 	o.ModTime = m.Stat.ModTime | 
					
						
							|  |  |  | 	o.Size = m.Stat.Size | 
					
						
							|  |  |  | 	o.ETag = extractETag(m.Meta) | 
					
						
							|  |  |  | 	o.ContentType = m.Meta["content-type"] | 
					
						
							|  |  |  | 	o.ContentEncoding = m.Meta["content-encoding"] | 
					
						
							| 
									
										
										
										
											2019-10-07 13:50:24 +08:00
										 |  |  | 	if storageClass, ok := m.Meta[xhttp.AmzStorageClass]; ok { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		o.StorageClass = storageClass | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		o.StorageClass = globalMinioDefaultStorageClass | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var ( | 
					
						
							|  |  |  | 		t time.Time | 
					
						
							|  |  |  | 		e error | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 	if exp, ok := m.Meta["expires"]; ok { | 
					
						
							|  |  |  | 		if t, e = time.Parse(http.TimeFormat, exp); e == nil { | 
					
						
							|  |  |  | 			o.Expires = t.UTC() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// etag/md5Sum has already been extracted. We need to
 | 
					
						
							|  |  |  | 	// remove to avoid it from appearing as part of user-defined metadata
 | 
					
						
							|  |  |  | 	o.UserDefined = cleanMetadata(m.Meta) | 
					
						
							|  |  |  | 	return o | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // represents disk cache struct
 | 
					
						
							|  |  |  | type diskCache struct { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 	// is set to 0 if drive is offline
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	online       uint32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
 | 
					
						
							|  |  |  | 	purgeRunning int32 | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	triggerGC     chan struct{} | 
					
						
							|  |  |  | 	dir           string         // caching directory
 | 
					
						
							|  |  |  | 	stats         CacheDiskStats // disk cache stats for prometheus
 | 
					
						
							|  |  |  | 	quotaPct      int            // max usage in %
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	pool          sync.Pool | 
					
						
							|  |  |  | 	after         int // minimum accesses before an object is cached.
 | 
					
						
							|  |  |  | 	lowWatermark  int | 
					
						
							|  |  |  | 	highWatermark int | 
					
						
							| 
									
										
										
										
											2020-06-30 04:25:29 +08:00
										 |  |  | 	enableRange   bool | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// nsMutex namespace lock
 | 
					
						
							|  |  |  | 	nsMutex *nsLockMap | 
					
						
							|  |  |  | 	// Object functions pointing to the corresponding functions of backend implementation.
 | 
					
						
							|  |  |  | 	NewNSLockFn func(ctx context.Context, cachePath string) RWLocker | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Inits the disk cache dir if it is not initialized already.
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:25:29 +08:00
										 |  |  | func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCache, error) { | 
					
						
							|  |  |  | 	quotaPct := config.MaxUse | 
					
						
							|  |  |  | 	if quotaPct == 0 { | 
					
						
							|  |  |  | 		quotaPct = config.Quota | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err := os.MkdirAll(dir, 0777); err != nil { | 
					
						
							| 
									
										
										
										
											2019-12-03 01:28:01 +08:00
										 |  |  | 		return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	cache := diskCache{ | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		dir:           dir, | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 		triggerGC:     make(chan struct{}, 1), | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 		stats:         CacheDiskStats{Dir: dir}, | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		quotaPct:      quotaPct, | 
					
						
							| 
									
										
										
										
											2020-06-30 04:25:29 +08:00
										 |  |  | 		after:         config.After, | 
					
						
							|  |  |  | 		lowWatermark:  config.WatermarkLow, | 
					
						
							|  |  |  | 		highWatermark: config.WatermarkHigh, | 
					
						
							|  |  |  | 		enableRange:   config.Range, | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 		online:        1, | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		pool: sync.Pool{ | 
					
						
							|  |  |  | 			New: func() interface{} { | 
					
						
							| 
									
										
										
										
											2020-03-13 09:57:41 +08:00
										 |  |  | 				b := disk.AlignedBlock(int(cacheBlkSize)) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 				return &b | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		nsMutex: newNSLock(false), | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	go cache.purgeWait(ctx) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	cache.diskSpaceAvailable(0) // update if cache usage is already high.
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	cache.NewNSLockFn = func(ctx context.Context, cachePath string) RWLocker { | 
					
						
							|  |  |  | 		return cache.nsMutex.NewNSLock(ctx, nil, cachePath, "") | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return &cache, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | // diskUsageLow() returns true if disk usage falls below the low watermark w.r.t configured cache quota.
 | 
					
						
							|  |  |  | // Ex. for a 100GB disk, if quota is configured as 70%  and watermark_low = 80% and
 | 
					
						
							|  |  |  | // watermark_high = 90% then garbage collection starts when 63% of disk is used and
 | 
					
						
							|  |  |  | // stops when disk usage drops to 56%
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | func (c *diskCache) diskUsageLow() bool { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	gcStopPct := c.quotaPct * c.lowWatermark / 100 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	di, err := disk.GetInfo(c.dir) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 		ctx := logger.SetReqInfo(GlobalContext, reqInfo) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-30 13:54:02 +08:00
										 |  |  | 	usedPercent := (di.Used / di.Total) * 100 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	low := int(usedPercent) < gcStopPct | 
					
						
							|  |  |  | 	atomic.StoreUint64(&c.stats.UsagePercent, usedPercent) | 
					
						
							|  |  |  | 	if low { | 
					
						
							|  |  |  | 		atomic.StoreInt32(&c.stats.UsageState, 0) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return low | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | // Returns if the disk usage reaches  or exceeds configured cache quota when size is added.
 | 
					
						
							|  |  |  | // If current usage without size exceeds high watermark a GC is automatically queued.
 | 
					
						
							|  |  |  | func (c *diskCache) diskSpaceAvailable(size int64) bool { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	gcTriggerPct := c.quotaPct * c.highWatermark / 100 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	di, err := disk.GetInfo(c.dir) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 		ctx := logger.SetReqInfo(GlobalContext, reqInfo) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		return false | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if di.Total == 0 { | 
					
						
							|  |  |  | 		logger.Info("diskCache: Received 0 total disk size") | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-30 13:54:02 +08:00
										 |  |  | 	usedPercent := float64(di.Used) * 100 / float64(di.Total) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if usedPercent >= float64(gcTriggerPct) { | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 		atomic.StoreInt32(&c.stats.UsageState, 1) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 		c.queueGC() | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	atomic.StoreUint64(&c.stats.UsagePercent, uint64(usedPercent)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Recalculate percentage with provided size added.
 | 
					
						
							| 
									
										
										
										
											2020-09-30 13:54:02 +08:00
										 |  |  | 	usedPercent = float64(di.Used+uint64(size)) * 100 / float64(di.Total) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return usedPercent < float64(c.quotaPct) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | // queueGC will queue a GC.
 | 
					
						
							|  |  |  | // Calling this function is always non-blocking.
 | 
					
						
							|  |  |  | func (c *diskCache) queueGC() { | 
					
						
							|  |  |  | 	select { | 
					
						
							|  |  |  | 	case c.triggerGC <- struct{}{}: | 
					
						
							|  |  |  | 	default: | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | // toClear returns how many bytes should be cleared to reach the low watermark quota.
 | 
					
						
							|  |  |  | // returns 0 if below quota.
 | 
					
						
							|  |  |  | func (c *diskCache) toClear() uint64 { | 
					
						
							|  |  |  | 	di, err := disk.GetInfo(c.dir) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 		ctx := logger.SetReqInfo(GlobalContext, reqInfo) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return 0 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark)) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | var ( | 
					
						
							|  |  |  | 	errDoneForNow = errors.New("done for now") | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | func (c *diskCache) purgeWait(ctx context.Context) { | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-ctx.Done(): | 
					
						
							|  |  |  | 		case <-c.triggerGC: // wait here until someone triggers.
 | 
					
						
							|  |  |  | 			c.purge(ctx) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | // Purge cache entries that were not accessed.
 | 
					
						
							| 
									
										
										
										
											2020-03-23 03:16:36 +08:00
										 |  |  | func (c *diskCache) purge(ctx context.Context) { | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	if atomic.LoadInt32(&c.purgeRunning) == 1 || c.diskUsageLow() { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	toFree := c.toClear() | 
					
						
							|  |  |  | 	if toFree == 0 { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	atomic.StoreInt32(&c.purgeRunning, 1) // do not run concurrent purge()
 | 
					
						
							|  |  |  | 	defer atomic.StoreInt32(&c.purgeRunning, 0) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	// expiry for cleaning up old cache.json files that
 | 
					
						
							|  |  |  | 	// need to be cleaned up.
 | 
					
						
							|  |  |  | 	expiry := UTCNow().Add(-cacheExpiryDays) | 
					
						
							|  |  |  | 	// defaulting max hits count to 100
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	// ignore error we know what value we are passing.
 | 
					
						
							|  |  |  | 	scorer, _ := newFileScorer(toFree, time.Now().Unix(), 100) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// this function returns FileInfo for cached range files and cache data file.
 | 
					
						
							|  |  |  | 	fiStatFn := func(ranges map[string]string, dataFile, pathPrefix string) map[string]os.FileInfo { | 
					
						
							|  |  |  | 		fm := make(map[string]os.FileInfo) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		fname := pathJoin(pathPrefix, dataFile) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		if fi, err := os.Stat(fname); err == nil { | 
					
						
							|  |  |  | 			fm[fname] = fi | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for _, rngFile := range ranges { | 
					
						
							|  |  |  | 			fname = pathJoin(pathPrefix, rngFile) | 
					
						
							|  |  |  | 			if fi, err := os.Stat(fname); err == nil { | 
					
						
							|  |  |  | 				fm[fname] = fi | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return fm | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 	filterFn := func(name string, typ os.FileMode) error { | 
					
						
							|  |  |  | 		if name == minioMetaBucket { | 
					
						
							|  |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 		cacheDir := pathJoin(c.dir, name) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		meta, _, numHits, err := c.statCachedMeta(ctx, cacheDir) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			// delete any partially filled cache entry left behind.
 | 
					
						
							|  |  |  | 			removeAll(cacheDir) | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		// stat all cached file ranges and cacheDataFile.
 | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 		cachedFiles := fiStatFn(meta.Ranges, cacheDataFile, pathJoin(c.dir, name)) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		objInfo := meta.ToObjectInfo("", "") | 
					
						
							|  |  |  | 		cc := cacheControlOpts(objInfo) | 
					
						
							|  |  |  | 		for fname, fi := range cachedFiles { | 
					
						
							|  |  |  | 			if cc != nil { | 
					
						
							|  |  |  | 				if cc.isStale(objInfo.ModTime) { | 
					
						
							|  |  |  | 					if err = removeAll(fname); err != nil { | 
					
						
							|  |  |  | 						logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					scorer.adjustSaveBytes(-fi.Size()) | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 					// break early if sufficient disk space reclaimed.
 | 
					
						
							|  |  |  | 					if c.diskUsageLow() { | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 						// if we found disk usage is already low, we return nil filtering is complete.
 | 
					
						
							|  |  |  | 						return errDoneForNow | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 			scorer.addFile(fname, atime.Get(fi), fi.Size(), numHits) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		// clean up stale cache.json files for objects that never got cached but access count was maintained in cache.json
 | 
					
						
							|  |  |  | 		fi, err := os.Stat(pathJoin(cacheDir, cacheMetaJSONFile)) | 
					
						
							|  |  |  | 		if err != nil || (fi.ModTime().Before(expiry) && len(cachedFiles) == 0) { | 
					
						
							|  |  |  | 			removeAll(cacheDir) | 
					
						
							|  |  |  | 			scorer.adjustSaveBytes(-fi.Size()) | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// if we found disk usage is already low, we return nil filtering is complete.
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		if c.diskUsageLow() { | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 			return errDoneForNow | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Proceed to next file.
 | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err := readDirFilterFn(c.dir, filterFn); err != nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	scorer.purgeFunc(func(qfile queuedFile) { | 
					
						
							|  |  |  | 		fileName := qfile.name | 
					
						
							|  |  |  | 		removeAll(fileName) | 
					
						
							|  |  |  | 		slashIdx := strings.LastIndex(fileName, SlashSeparator) | 
					
						
							|  |  |  | 		if slashIdx >= 0 { | 
					
						
							|  |  |  | 			fileNamePrefix := fileName[0:slashIdx] | 
					
						
							|  |  |  | 			fname := fileName[slashIdx+1:] | 
					
						
							|  |  |  | 			if fname == cacheDataFile { | 
					
						
							|  |  |  | 				removeAll(fileNamePrefix) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	scorer.reset() | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // sets cache drive status
 | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | func (c *diskCache) setOffline() { | 
					
						
							|  |  |  | 	atomic.StoreUint32(&c.online, 0) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // returns true if cache drive is online
 | 
					
						
							|  |  |  | func (c *diskCache) IsOnline() bool { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 	return atomic.LoadUint32(&c.online) != 0 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Stat returns ObjectInfo from disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectInfo, numHits int, err error) { | 
					
						
							|  |  |  | 	var partial bool | 
					
						
							|  |  |  | 	var meta *cacheMeta | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// Stat the file to get file size.
 | 
					
						
							|  |  |  | 	meta, partial, numHits, err = c.statCachedMeta(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	if partial { | 
					
						
							|  |  |  | 		return oi, numHits, errFileNotFound | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	oi = meta.ToObjectInfo("", "") | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	oi.Bucket = bucket | 
					
						
							|  |  |  | 	oi.Name = object | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if err = decryptCacheObjectETag(&oi); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | // statCachedMeta returns metadata from cache - including ranges cached, partial to indicate
 | 
					
						
							|  |  |  | // if partial object is cached.
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	cLock := c.NewNSLockFn(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2020-08-18 02:29:58 +08:00
										 |  |  | 	if err = cLock.GetRLock(globalOperationTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	defer cLock.RUnlock() | 
					
						
							|  |  |  | 	return c.statCache(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // statRange returns ObjectInfo and RangeInfo from disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) statRange(ctx context.Context, bucket, object string, rs *HTTPRangeSpec) (oi ObjectInfo, rngInfo RangeInfo, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// Stat the file to get file size.
 | 
					
						
							|  |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	var meta *cacheMeta | 
					
						
							|  |  |  | 	var partial bool | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	meta, partial, numHits, err = c.statCachedMeta(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	oi = meta.ToObjectInfo("", "") | 
					
						
							|  |  |  | 	oi.Bucket = bucket | 
					
						
							|  |  |  | 	oi.Name = object | 
					
						
							|  |  |  | 	if !partial { | 
					
						
							|  |  |  | 		err = decryptCacheObjectETag(&oi) | 
					
						
							|  |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	actualSize := uint64(meta.Stat.Size) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	var length int64 | 
					
						
							|  |  |  | 	_, length, err = rs.GetOffsetLength(int64(actualSize)) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	actualRngSize := uint64(length) | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		actualRngSize, _ = sio.EncryptedSize(uint64(length)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	rng := rs.String(int64(actualSize)) | 
					
						
							|  |  |  | 	rngFile, ok := meta.Ranges[rng] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return oi, rngInfo, numHits, ObjectNotFound{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	if _, err = os.Stat(pathJoin(cacheObjPath, rngFile)); err != nil { | 
					
						
							|  |  |  | 		return oi, rngInfo, numHits, ObjectNotFound{Bucket: bucket, Object: object} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	rngInfo = RangeInfo{Range: rng, File: rngFile, Size: int64(actualRngSize)} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	err = decryptCacheObjectETag(&oi) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // statCache is a convenience function for purge() to get ObjectInfo for cached object
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// Stat the file to get file size.
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	metaPath := pathJoin(cacheObjPath, cacheMetaJSONFile) | 
					
						
							|  |  |  | 	f, err := os.Open(metaPath) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return meta, partial, 0, err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 	meta = &cacheMeta{Version: cacheMetaVersion} | 
					
						
							|  |  |  | 	if err := jsonLoad(f, meta); err != nil { | 
					
						
							|  |  |  | 		return meta, partial, 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// get metadata of part.1 if full file has been cached.
 | 
					
						
							|  |  |  | 	partial = true | 
					
						
							|  |  |  | 	fi, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)) | 
					
						
							|  |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		meta.Stat.ModTime = atime.Get(fi) | 
					
						
							|  |  |  | 		partial = false | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return meta, partial, meta.Hits, nil | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // saves object metadata to disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | // incHitsOnly is true if metadata update is incrementing only the hit counter
 | 
					
						
							|  |  |  | func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error { | 
					
						
							|  |  |  | 	cachedPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	cLock := c.NewNSLockFn(ctx, cachedPath) | 
					
						
							| 
									
										
										
										
											2020-08-18 02:29:58 +08:00
										 |  |  | 	if err := cLock.GetLock(globalOperationTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer cLock.Unlock() | 
					
						
							|  |  |  | 	return c.saveMetadata(ctx, bucket, object, meta, actualSize, rs, rsFileName, incHitsOnly) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | // saves object metadata to disk cache
 | 
					
						
							|  |  |  | // incHitsOnly is true if metadata update is incrementing only the hit counter
 | 
					
						
							|  |  |  | func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error { | 
					
						
							|  |  |  | 	cachedPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	metaPath := pathJoin(cachedPath, cacheMetaJSONFile) | 
					
						
							|  |  |  | 	// Create cache directory if needed
 | 
					
						
							|  |  |  | 	if err := os.MkdirAll(cachedPath, 0777); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	f, err := os.OpenFile(metaPath, os.O_RDWR|os.O_CREATE, 0666) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	m := &cacheMeta{Version: cacheMetaVersion} | 
					
						
							|  |  |  | 	if err := jsonLoad(f, m); err != nil && err != io.EOF { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// increment hits
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if rs != nil { | 
					
						
							| 
									
										
										
										
											2020-05-25 15:17:03 +08:00
										 |  |  | 		// rsFileName gets set by putRange. Check for blank values here
 | 
					
						
							|  |  |  | 		// coming from other code paths that set rs only (eg initial creation or hit increment).
 | 
					
						
							|  |  |  | 		if rsFileName != "" { | 
					
						
							|  |  |  | 			if m.Ranges == nil { | 
					
						
							|  |  |  | 				m.Ranges = make(map[string]string) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			m.Ranges[rs.String(actualSize)] = rsFileName | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		// this is necessary cleanup of range files if entire object is cached.
 | 
					
						
							|  |  |  | 		for _, f := range m.Ranges { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 			removeAll(pathJoin(cachedPath, f)) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		m.Ranges = nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	m.Stat.Size = actualSize | 
					
						
							|  |  |  | 	m.Stat.ModTime = UTCNow() | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	if !incHitsOnly { | 
					
						
							|  |  |  | 		// reset meta
 | 
					
						
							|  |  |  | 		m.Meta = meta | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		if m.Meta == nil { | 
					
						
							|  |  |  | 			m.Meta = make(map[string]string) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-02-13 11:19:32 +08:00
										 |  |  | 		if etag, ok := meta["etag"]; ok { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 			m.Meta["etag"] = etag | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	m.Hits++ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize} | 
					
						
							|  |  |  | 	return jsonSave(f, m) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func getCacheSHADir(dir, bucket, object string) string { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	return pathJoin(dir, getSHA256Hash([]byte(pathJoin(bucket, object)))) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Cache data to disk with bitrot checksum added for each block of 1MB
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Reader, size uint64) (int64, error) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err := os.MkdirAll(cachePath, 0777); err != nil { | 
					
						
							|  |  |  | 		return 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	filePath := pathJoin(cachePath, fileName) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if filePath == "" || reader == nil { | 
					
						
							|  |  |  | 		return 0, errInvalidArgument | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err := checkPathLength(filePath); err != nil { | 
					
						
							|  |  |  | 		return 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	f, err := os.Create(filePath) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		return 0, osErrToFileErr(err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var bytesWritten int64 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	h := HighwayHash256S.New() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bufp := c.pool.Get().(*[]byte) | 
					
						
							|  |  |  | 	defer c.pool.Put(bufp) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 	var n, n2 int | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	for { | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 		n, err = io.ReadFull(reader, *bufp) | 
					
						
							|  |  |  | 		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			return 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 		eof := err == io.EOF || err == io.ErrUnexpectedEOF | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		if n == 0 && size != 0 { | 
					
						
							|  |  |  | 			// Reached EOF, nothing more to be done.
 | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		h.Reset() | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 		if _, err = h.Write((*bufp)[:n]); err != nil { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			return 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		hashBytes := h.Sum(nil) | 
					
						
							|  |  |  | 		if _, err = f.Write(hashBytes); err != nil { | 
					
						
							|  |  |  | 			return 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		if n2, err = f.Write((*bufp)[:n]); err != nil { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			return 0, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		bytesWritten += int64(n2) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		if eof { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return bytesWritten, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | func newCacheEncryptReader(content io.Reader, bucket, object string, metadata map[string]string) (r io.Reader, err error) { | 
					
						
							|  |  |  | 	objectEncryptionKey, err := newCacheEncryptMetadata(bucket, object, metadata) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, crypto.ErrInvalidCustomerKey | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return reader, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | func newCacheEncryptMetadata(bucket, object string, metadata map[string]string) ([]byte, error) { | 
					
						
							|  |  |  | 	var sealedKey crypto.SealedKey | 
					
						
							|  |  |  | 	if globalCacheKMS == nil { | 
					
						
							|  |  |  | 		return nil, errKMSNotConfigured | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-07-09 09:50:43 +08:00
										 |  |  | 	key, encKey, err := globalCacheKMS.GenerateKey(globalCacheKMS.DefaultKeyID(), crypto.Context{bucket: pathJoin(bucket, object)}) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	objectKey := crypto.GenerateKey(key, rand.Reader) | 
					
						
							|  |  |  | 	sealedKey = objectKey.Seal(key, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object) | 
					
						
							| 
									
										
										
										
											2020-07-09 09:50:43 +08:00
										 |  |  | 	crypto.S3.CreateMetadata(metadata, globalCacheKMS.DefaultKeyID(), encKey, sealedKey) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if etag, ok := metadata["etag"]; ok { | 
					
						
							|  |  |  | 		metadata["etag"] = hex.EncodeToString(objectKey.SealETag([]byte(etag))) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	metadata[SSECacheEncrypted] = "" | 
					
						
							|  |  |  | 	return objectKey[:], nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | // Caches the object to disk
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly bool) error { | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if !c.diskSpaceAvailable(size) { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		io.Copy(ioutil.Discard, data) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		return errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	cachePath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	cLock := c.NewNSLockFn(ctx, cachePath) | 
					
						
							| 
									
										
										
										
											2020-08-18 02:29:58 +08:00
										 |  |  | 	if err := cLock.GetLock(globalOperationTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer cLock.Unlock() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	meta, _, numHits, err := c.statCache(ctx, cachePath) | 
					
						
							|  |  |  | 	// Case where object not yet cached
 | 
					
						
							|  |  |  | 	if os.IsNotExist(err) && c.after >= 1 { | 
					
						
							|  |  |  | 		return c.saveMetadata(ctx, bucket, object, opts.UserDefined, size, nil, "", false) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Case where object already has a cache metadata entry but not yet cached
 | 
					
						
							|  |  |  | 	if err == nil && numHits < c.after { | 
					
						
							|  |  |  | 		cETag := extractETag(meta.Meta) | 
					
						
							|  |  |  | 		bETag := extractETag(opts.UserDefined) | 
					
						
							|  |  |  | 		if cETag == bETag { | 
					
						
							|  |  |  | 			return c.saveMetadata(ctx, bucket, object, opts.UserDefined, size, nil, "", false) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		incHitsOnly = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if rs != nil { | 
					
						
							|  |  |  | 		return c.putRange(ctx, bucket, object, data, size, rs, opts) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if !c.diskSpaceAvailable(size) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		return errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err := os.MkdirAll(cachePath, 0777); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-11 02:37:22 +08:00
										 |  |  | 	var metadata = cloneMSS(opts.UserDefined) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	var reader = data | 
					
						
							|  |  |  | 	var actualSize = uint64(size) | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		reader, err = newCacheEncryptReader(data, bucket, object, metadata) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		actualSize, _ = sio.EncryptedSize(uint64(size)) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	n, err := c.bitrotWriteToCache(cachePath, cacheDataFile, reader, actualSize) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if IsErr(err, baseErrs...) { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 		// take the cache drive offline
 | 
					
						
							|  |  |  | 		c.setOffline() | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 	if actualSize != uint64(n) { | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 		return IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return c.saveMetadata(ctx, bucket, object, metadata, n, nil, "", incHitsOnly) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Caches the range to disk
 | 
					
						
							|  |  |  | func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	rlen, err := rs.GetLength(size) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if !c.diskSpaceAvailable(rlen) { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		return errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	cachePath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	if err := os.MkdirAll(cachePath, 0777); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-11 02:37:22 +08:00
										 |  |  | 	var metadata = cloneMSS(opts.UserDefined) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	var reader = data | 
					
						
							|  |  |  | 	var actualSize = uint64(rlen) | 
					
						
							|  |  |  | 	// objSize is the actual size of object (with encryption overhead if any)
 | 
					
						
							|  |  |  | 	var objSize = uint64(size) | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		reader, err = newCacheEncryptReader(data, bucket, object, metadata) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		actualSize, _ = sio.EncryptedSize(uint64(rlen)) | 
					
						
							|  |  |  | 		objSize, _ = sio.EncryptedSize(uint64(size)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	cacheFile := MustGetUUID() | 
					
						
							|  |  |  | 	n, err := c.bitrotWriteToCache(cachePath, cacheFile, reader, actualSize) | 
					
						
							|  |  |  | 	if IsErr(err, baseErrs...) { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 		// take the cache drive offline
 | 
					
						
							|  |  |  | 		c.setOffline() | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		removeAll(cachePath) | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if actualSize != uint64(n) { | 
					
						
							|  |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 		return IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return c.saveMetadata(ctx, bucket, object, metadata, int64(objSize), rs, cacheFile, false) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // checks streaming bitrot checksum of cached object before returning data
 | 
					
						
							|  |  |  | func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, offset, length int64, writer io.Writer) error { | 
					
						
							|  |  |  | 	h := HighwayHash256S.New() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	checksumHash := make([]byte, h.Size()) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	startBlock := offset / cacheBlkSize | 
					
						
							|  |  |  | 	endBlock := (offset + length) / cacheBlkSize | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// get block start offset
 | 
					
						
							|  |  |  | 	var blockStartOffset int64 | 
					
						
							|  |  |  | 	if startBlock > 0 { | 
					
						
							|  |  |  | 		blockStartOffset = (cacheBlkSize + int64(h.Size())) * startBlock | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tillLength := (cacheBlkSize + int64(h.Size())) * (endBlock - startBlock + 1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Start offset cannot be negative.
 | 
					
						
							|  |  |  | 	if offset < 0 { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, errUnexpected) | 
					
						
							|  |  |  | 		return errUnexpected | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Writer cannot be nil.
 | 
					
						
							|  |  |  | 	if writer == nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, errUnexpected) | 
					
						
							|  |  |  | 		return errUnexpected | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var blockOffset, blockLength int64 | 
					
						
							|  |  |  | 	rc, err := readCacheFileStream(filePath, blockStartOffset, tillLength) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	bufp := c.pool.Get().(*[]byte) | 
					
						
							|  |  |  | 	defer c.pool.Put(bufp) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for block := startBlock; block <= endBlock; block++ { | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case startBlock == endBlock: | 
					
						
							|  |  |  | 			blockOffset = offset % cacheBlkSize | 
					
						
							|  |  |  | 			blockLength = length | 
					
						
							|  |  |  | 		case block == startBlock: | 
					
						
							|  |  |  | 			blockOffset = offset % cacheBlkSize | 
					
						
							|  |  |  | 			blockLength = cacheBlkSize - blockOffset | 
					
						
							|  |  |  | 		case block == endBlock: | 
					
						
							|  |  |  | 			blockOffset = 0 | 
					
						
							|  |  |  | 			blockLength = (offset + length) % cacheBlkSize | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			blockOffset = 0 | 
					
						
							|  |  |  | 			blockLength = cacheBlkSize | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if blockLength == 0 { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if _, err := io.ReadFull(rc, checksumHash); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		h.Reset() | 
					
						
							|  |  |  | 		n, err := io.ReadFull(rc, *bufp) | 
					
						
							|  |  |  | 		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		eof := err == io.EOF || err == io.ErrUnexpectedEOF | 
					
						
							|  |  |  | 		if n == 0 && length != 0 { | 
					
						
							|  |  |  | 			// Reached EOF, nothing more to be done.
 | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if _, e := h.Write((*bufp)[:n]); e != nil { | 
					
						
							|  |  |  | 			return e | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		hashBytes := h.Sum(nil) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if !bytes.Equal(hashBytes, checksumHash) { | 
					
						
							| 
									
										
										
										
											2019-10-02 04:12:15 +08:00
										 |  |  | 			err = fmt.Errorf("hashes do not match expected %s, got %s", | 
					
						
							|  |  |  | 				hex.EncodeToString(checksumHash), hex.EncodeToString(hashBytes)) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 			logger.LogIf(GlobalContext, err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if _, err := io.Copy(writer, bytes.NewReader((*bufp)[blockOffset:blockOffset+blockLength])); err != nil { | 
					
						
							|  |  |  | 			if err != io.ErrClosedPipe { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2019-10-23 06:04:25 +08:00
										 |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-23 06:04:25 +08:00
										 |  |  | 			eof = true | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if eof { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Get returns ObjectInfo and reader for object from disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	cLock := c.NewNSLockFn(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2020-08-18 02:29:58 +08:00
										 |  |  | 	if err := cLock.GetRLock(globalOperationTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return nil, numHits, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	defer cLock.RUnlock() | 
					
						
							|  |  |  | 	var objInfo ObjectInfo | 
					
						
							|  |  |  | 	var rngInfo RangeInfo | 
					
						
							|  |  |  | 	if objInfo, rngInfo, numHits, err = c.statRange(ctx, bucket, object, rs); err != nil { | 
					
						
							|  |  |  | 		return nil, numHits, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	cacheFile := cacheDataFile | 
					
						
							|  |  |  | 	objSize := objInfo.Size | 
					
						
							|  |  |  | 	if !rngInfo.Empty() { | 
					
						
							|  |  |  | 		// for cached ranges, need to pass actual range file size to GetObjectReader
 | 
					
						
							|  |  |  | 		// and clear out range spec
 | 
					
						
							|  |  |  | 		cacheFile = rngInfo.File | 
					
						
							|  |  |  | 		objInfo.Size = rngInfo.Size | 
					
						
							|  |  |  | 		rs = nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	var nsUnlocker = func() {} | 
					
						
							|  |  |  | 	// For a directory, we need to send an reader that returns no bytes.
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 	if HasSuffix(object, SlashSeparator) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		// The lock taken above is released when
 | 
					
						
							|  |  |  | 		// objReader.Close() is called by the caller.
 | 
					
						
							| 
									
										
										
										
											2020-04-21 13:01:59 +08:00
										 |  |  | 		gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts, nsUnlocker) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return gr, numHits, gerr | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-21 13:01:59 +08:00
										 |  |  | 	fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts, nsUnlocker) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if nErr != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return nil, numHits, nErr | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	filePath := pathJoin(cacheObjPath, cacheFile) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	pr, pw := io.Pipe() | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		err := c.bitrotReadFromCache(ctx, filePath, off, length, pw) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			removeAll(cacheObjPath) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		pw.CloseWithError(err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	}() | 
					
						
							|  |  |  | 	// Cleanup function to cause the go routine above to exit, in
 | 
					
						
							|  |  |  | 	// case of incomplete read.
 | 
					
						
							|  |  |  | 	pipeCloser := func() { pr.Close() } | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-18 04:01:22 +08:00
										 |  |  | 	gr, gerr := fn(pr, h, opts.CheckPrecondFn, pipeCloser) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if gerr != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return gr, numHits, gerr | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		// clean up internal SSE cache metadata
 | 
					
						
							|  |  |  | 		delete(gr.ObjInfo.UserDefined, crypto.SSEHeader) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !rngInfo.Empty() { | 
					
						
							|  |  |  | 		// overlay Size with actual object size and not the range size
 | 
					
						
							|  |  |  | 		gr.ObjInfo.Size = objSize | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return gr, numHits, nil | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Deletes the cached object
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) { | 
					
						
							|  |  |  | 	cLock := c.NewNSLockFn(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2020-08-18 02:29:58 +08:00
										 |  |  | 	if err := cLock.GetLock(globalOperationTimeout); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer cLock.Unlock() | 
					
						
							|  |  |  | 	return removeAll(cacheObjPath) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | // Deletes the cached object
 | 
					
						
							|  |  |  | func (c *diskCache) Delete(ctx context.Context, bucket, object string) (err error) { | 
					
						
							|  |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	return c.delete(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // convenience function to check if object is cached on this diskCache
 | 
					
						
							|  |  |  | func (c *diskCache) Exists(ctx context.Context, bucket, object string) bool { | 
					
						
							|  |  |  | 	if _, err := os.Stat(getCacheSHADir(c.dir, bucket, object)); err != nil { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } |