| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"bytes" | 
					
						
							|  |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	"crypto/md5" | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	"crypto/rand" | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	"encoding/base64" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"encoding/hex" | 
					
						
							| 
									
										
										
										
											2022-03-04 05:21:16 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							|  |  |  | 	"net/http" | 
					
						
							|  |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	"path" | 
					
						
							|  |  |  | 	"strconv" | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 	"sync/atomic" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/djherbis/atime" | 
					
						
							| 
									
										
										
										
											2022-09-07 22:24:54 +08:00
										 |  |  | 	"github.com/minio/minio/internal/amztime" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/config/cache" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/crypto" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/disk" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/fips" | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	"github.com/minio/minio/internal/hash" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	xhttp "github.com/minio/minio/internal/http" | 
					
						
							|  |  |  | 	xioutil "github.com/minio/minio/internal/ioutil" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/kms" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/logger" | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	"github.com/minio/sio" | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const ( | 
					
						
							|  |  |  | 	// cache.json object metadata for cached objects.
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	cacheMetaJSONFile   = "cache.json" | 
					
						
							|  |  |  | 	cacheDataFile       = "part.1" | 
					
						
							|  |  |  | 	cacheDataFilePrefix = "part" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	cacheMetaVersion = "1.0.0" | 
					
						
							|  |  |  | 	cacheExpiryDays  = 90 * time.Hour * 24 // defaults to 90 days
 | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	// SSECacheEncrypted is the metadata key indicating that the object
 | 
					
						
							|  |  |  | 	// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache" | 
					
						
							|  |  |  | 	cacheMultipartDir = "multipart" | 
					
						
							|  |  |  | 	cacheWritebackDir = "writeback" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	cacheStaleUploadCleanupInterval = time.Hour * 24 | 
					
						
							|  |  |  | 	cacheStaleUploadExpiry          = time.Hour * 24 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	cacheWBStaleUploadExpiry        = time.Hour * 24 * 7 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CacheChecksumInfoV1 - carries checksums of individual blocks on disk.
 | 
					
						
							|  |  |  | type CacheChecksumInfoV1 struct { | 
					
						
							|  |  |  | 	Algorithm string `json:"algorithm"` | 
					
						
							|  |  |  | 	Blocksize int64  `json:"blocksize"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Represents the cache metadata struct
 | 
					
						
							|  |  |  | type cacheMeta struct { | 
					
						
							|  |  |  | 	Version string   `json:"version"` | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	Stat    StatInfo `json:"stat"` // Stat of the current object `cache.json`.
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// checksums of blocks on disk.
 | 
					
						
							|  |  |  | 	Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"` | 
					
						
							|  |  |  | 	// Metadata map for current object.
 | 
					
						
							|  |  |  | 	Meta map[string]string `json:"meta,omitempty"` | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// Ranges maps cached range to associated filename.
 | 
					
						
							|  |  |  | 	Ranges map[string]string `json:"ranges,omitempty"` | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// Hits is a counter on the number of times this object has been accessed so far.
 | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	Hits   int    `json:"hits,omitempty"` | 
					
						
							|  |  |  | 	Bucket string `json:"bucket,omitempty"` | 
					
						
							|  |  |  | 	Object string `json:"object,omitempty"` | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	// for multipart upload
 | 
					
						
							|  |  |  | 	PartNumbers     []int    `json:"partNums,omitempty"`   // Part Numbers
 | 
					
						
							|  |  |  | 	PartETags       []string `json:"partETags,omitempty"`  // Part ETags
 | 
					
						
							|  |  |  | 	PartSizes       []int64  `json:"partSizes,omitempty"`  // Part Sizes
 | 
					
						
							|  |  |  | 	PartActualSizes []int64  `json:"partASizes,omitempty"` // Part ActualSizes (compression)
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // RangeInfo has the range, file and range length information for a cached range.
 | 
					
						
							|  |  |  | type RangeInfo struct { | 
					
						
							|  |  |  | 	Range string | 
					
						
							|  |  |  | 	File  string | 
					
						
							|  |  |  | 	Size  int64 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Empty returns true if this is an empty struct
 | 
					
						
							|  |  |  | func (r *RangeInfo) Empty() bool { | 
					
						
							|  |  |  | 	return r.Range == "" && r.File == "" && r.Size == 0 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | func (m *cacheMeta) ToObjectInfo() (o ObjectInfo) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if len(m.Meta) == 0 { | 
					
						
							|  |  |  | 		m.Meta = make(map[string]string) | 
					
						
							|  |  |  | 		m.Stat.ModTime = timeSentinel | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	o = ObjectInfo{ | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 		Bucket:            m.Bucket, | 
					
						
							|  |  |  | 		Name:              m.Object, | 
					
						
							| 
									
										
										
										
											2020-01-11 12:21:13 +08:00
										 |  |  | 		CacheStatus:       CacheHit, | 
					
						
							|  |  |  | 		CacheLookupStatus: CacheHit, | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	meta := cloneMSS(m.Meta) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	// We set file info only if its valid.
 | 
					
						
							|  |  |  | 	o.Size = m.Stat.Size | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	o.ETag = extractETag(meta) | 
					
						
							|  |  |  | 	o.ContentType = meta["content-type"] | 
					
						
							|  |  |  | 	o.ContentEncoding = meta["content-encoding"] | 
					
						
							|  |  |  | 	if storageClass, ok := meta[xhttp.AmzStorageClass]; ok { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		o.StorageClass = storageClass | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		o.StorageClass = globalMinioDefaultStorageClass | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-09-07 22:24:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if exp, ok := meta["expires"]; ok { | 
					
						
							| 
									
										
										
										
											2022-09-07 22:24:54 +08:00
										 |  |  | 		if t, e := amztime.ParseHeader(exp); e == nil { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			o.Expires = t.UTC() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if mtime, ok := meta["last-modified"]; ok { | 
					
						
							| 
									
										
										
										
											2022-09-07 22:24:54 +08:00
										 |  |  | 		if t, e := amztime.ParseHeader(mtime); e == nil { | 
					
						
							| 
									
										
										
										
											2021-02-12 11:25:47 +08:00
										 |  |  | 			o.ModTime = t.UTC() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	o.Parts = make([]ObjectPartInfo, len(m.PartNumbers)) | 
					
						
							|  |  |  | 	for i := range m.PartNumbers { | 
					
						
							|  |  |  | 		o.Parts[i].Number = m.PartNumbers[i] | 
					
						
							|  |  |  | 		o.Parts[i].Size = m.PartSizes[i] | 
					
						
							|  |  |  | 		o.Parts[i].ETag = m.PartETags[i] | 
					
						
							|  |  |  | 		o.Parts[i].ActualSize = m.PartActualSizes[i] | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	// etag/md5Sum has already been extracted. We need to
 | 
					
						
							|  |  |  | 	// remove to avoid it from appearing as part of user-defined metadata
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	o.UserDefined = cleanMetadata(meta) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	return o | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // represents disk cache struct
 | 
					
						
							|  |  |  | type diskCache struct { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 	// is set to 0 if drive is offline
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	online       uint32 // ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
 | 
					
						
							|  |  |  | 	purgeRunning int32 | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	triggerGC          chan struct{} | 
					
						
							|  |  |  | 	dir                string         // caching directory
 | 
					
						
							|  |  |  | 	stats              CacheDiskStats // disk cache stats for prometheus
 | 
					
						
							|  |  |  | 	quotaPct           int            // max usage in %
 | 
					
						
							|  |  |  | 	pool               sync.Pool | 
					
						
							|  |  |  | 	after              int // minimum accesses before an object is cached.
 | 
					
						
							|  |  |  | 	lowWatermark       int | 
					
						
							|  |  |  | 	highWatermark      int | 
					
						
							|  |  |  | 	enableRange        bool | 
					
						
							|  |  |  | 	commitWriteback    bool | 
					
						
							|  |  |  | 	commitWritethrough bool | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	retryWritebackCh chan ObjectInfo | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// nsMutex namespace lock
 | 
					
						
							|  |  |  | 	nsMutex *nsLockMap | 
					
						
							|  |  |  | 	// Object functions pointing to the corresponding functions of backend implementation.
 | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	NewNSLockFn func(cachePath string) RWLocker | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Inits the disk cache dir if it is not initialized already.
 | 
					
						
							| 
									
										
										
										
											2020-06-30 04:25:29 +08:00
										 |  |  | func newDiskCache(ctx context.Context, dir string, config cache.Config) (*diskCache, error) { | 
					
						
							|  |  |  | 	quotaPct := config.MaxUse | 
					
						
							|  |  |  | 	if quotaPct == 0 { | 
					
						
							|  |  |  | 		quotaPct = config.Quota | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err := os.MkdirAll(dir, 0o777); err != nil { | 
					
						
							| 
									
										
										
										
											2019-12-03 01:28:01 +08:00
										 |  |  | 		return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	cache := diskCache{ | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		dir:                dir, | 
					
						
							|  |  |  | 		triggerGC:          make(chan struct{}, 1), | 
					
						
							|  |  |  | 		stats:              CacheDiskStats{Dir: dir}, | 
					
						
							|  |  |  | 		quotaPct:           quotaPct, | 
					
						
							|  |  |  | 		after:              config.After, | 
					
						
							|  |  |  | 		lowWatermark:       config.WatermarkLow, | 
					
						
							|  |  |  | 		highWatermark:      config.WatermarkHigh, | 
					
						
							|  |  |  | 		enableRange:        config.Range, | 
					
						
							|  |  |  | 		commitWriteback:    config.CacheCommitMode == CommitWriteBack, | 
					
						
							|  |  |  | 		commitWritethrough: config.CacheCommitMode == CommitWriteThrough, | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		retryWritebackCh: make(chan ObjectInfo, 10000), | 
					
						
							|  |  |  | 		online:           1, | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		pool: sync.Pool{ | 
					
						
							|  |  |  | 			New: func() interface{} { | 
					
						
							| 
									
										
										
										
											2020-03-13 09:57:41 +08:00
										 |  |  | 				b := disk.AlignedBlock(int(cacheBlkSize)) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 				return &b | 
					
						
							|  |  |  | 			}, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		nsMutex: newNSLock(false), | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	go cache.purgeWait(ctx) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	go cache.cleanupStaleUploads(ctx) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	if cache.commitWriteback { | 
					
						
							|  |  |  | 		go cache.scanCacheWritebackFailures(ctx) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	cache.diskSpaceAvailable(0) // update if cache usage is already high.
 | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	cache.NewNSLockFn = func(cachePath string) RWLocker { | 
					
						
							|  |  |  | 		return cache.nsMutex.NewNSLock(nil, cachePath, "") | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return &cache, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | // diskUsageLow() returns true if disk usage falls below the low watermark w.r.t configured cache quota.
 | 
					
						
							|  |  |  | // Ex. for a 100GB disk, if quota is configured as 70%  and watermark_low = 80% and
 | 
					
						
							|  |  |  | // watermark_high = 90% then garbage collection starts when 63% of disk is used and
 | 
					
						
							|  |  |  | // stops when disk usage drops to 56%
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | func (c *diskCache) diskUsageLow() bool { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	gcStopPct := c.quotaPct * c.lowWatermark / 100 | 
					
						
							| 
									
										
										
										
											2023-08-01 06:20:48 +08:00
										 |  |  | 	di, err := disk.GetInfo(c.dir, false) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 		ctx := logger.SetReqInfo(GlobalContext, reqInfo) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-11-15 11:18:00 +08:00
										 |  |  | 	usedPercent := float64(di.Used) * 100 / float64(di.Total) | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	low := int(usedPercent) < gcStopPct | 
					
						
							| 
									
										
										
										
											2020-11-15 11:18:00 +08:00
										 |  |  | 	atomic.StoreUint64(&c.stats.UsagePercent, uint64(usedPercent)) | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	if low { | 
					
						
							|  |  |  | 		atomic.StoreInt32(&c.stats.UsageState, 0) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return low | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | // Returns if the disk usage reaches  or exceeds configured cache quota when size is added.
 | 
					
						
							|  |  |  | // If current usage without size exceeds high watermark a GC is automatically queued.
 | 
					
						
							|  |  |  | func (c *diskCache) diskSpaceAvailable(size int64) bool { | 
					
						
							| 
									
										
										
										
											2022-03-04 05:21:16 +08:00
										 |  |  | 	reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) | 
					
						
							|  |  |  | 	ctx := logger.SetReqInfo(GlobalContext, reqInfo) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	gcTriggerPct := c.quotaPct * c.highWatermark / 100 | 
					
						
							| 
									
										
										
										
											2023-08-01 06:20:48 +08:00
										 |  |  | 	di, err := disk.GetInfo(c.dir, false) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		return false | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if di.Total == 0 { | 
					
						
							| 
									
										
										
										
											2022-03-04 05:21:16 +08:00
										 |  |  | 		logger.LogIf(ctx, errors.New("diskCache: Received 0 total disk size")) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-30 13:54:02 +08:00
										 |  |  | 	usedPercent := float64(di.Used) * 100 / float64(di.Total) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if usedPercent >= float64(gcTriggerPct) { | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 		atomic.StoreInt32(&c.stats.UsageState, 1) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 		c.queueGC() | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	atomic.StoreUint64(&c.stats.UsagePercent, uint64(usedPercent)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Recalculate percentage with provided size added.
 | 
					
						
							| 
									
										
										
										
											2020-09-30 13:54:02 +08:00
										 |  |  | 	usedPercent = float64(di.Used+uint64(size)) * 100 / float64(di.Total) | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return usedPercent < float64(c.quotaPct) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | // queueGC will queue a GC.
 | 
					
						
							|  |  |  | // Calling this function is always non-blocking.
 | 
					
						
							|  |  |  | func (c *diskCache) queueGC() { | 
					
						
							|  |  |  | 	select { | 
					
						
							|  |  |  | 	case c.triggerGC <- struct{}{}: | 
					
						
							|  |  |  | 	default: | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | // toClear returns how many bytes should be cleared to reach the low watermark quota.
 | 
					
						
							|  |  |  | // returns 0 if below quota.
 | 
					
						
							|  |  |  | func (c *diskCache) toClear() uint64 { | 
					
						
							| 
									
										
										
										
											2023-08-01 06:20:48 +08:00
										 |  |  | 	di, err := disk.GetInfo(c.dir, false) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		reqInfo := (&logger.ReqInfo{}).AppendTags("cachePath", c.dir) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 		ctx := logger.SetReqInfo(GlobalContext, reqInfo) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return 0 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	return bytesToClear(int64(di.Total), int64(di.Free), uint64(c.quotaPct), uint64(c.lowWatermark), uint64(c.highWatermark)) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | func (c *diskCache) purgeWait(ctx context.Context) { | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-ctx.Done(): | 
					
						
							|  |  |  | 		case <-c.triggerGC: // wait here until someone triggers.
 | 
					
						
							|  |  |  | 			c.purge(ctx) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | // Purge cache entries that were not accessed.
 | 
					
						
							| 
									
										
										
										
											2020-03-23 03:16:36 +08:00
										 |  |  | func (c *diskCache) purge(ctx context.Context) { | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	if atomic.LoadInt32(&c.purgeRunning) == 1 || c.diskUsageLow() { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	toFree := c.toClear() | 
					
						
							|  |  |  | 	if toFree == 0 { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	atomic.StoreInt32(&c.purgeRunning, 1) // do not run concurrent purge()
 | 
					
						
							|  |  |  | 	defer atomic.StoreInt32(&c.purgeRunning, 0) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	// expiry for cleaning up old cache.json files that
 | 
					
						
							|  |  |  | 	// need to be cleaned up.
 | 
					
						
							|  |  |  | 	expiry := UTCNow().Add(-cacheExpiryDays) | 
					
						
							|  |  |  | 	// defaulting max hits count to 100
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	// ignore error we know what value we are passing.
 | 
					
						
							| 
									
										
										
										
											2021-12-07 07:55:29 +08:00
										 |  |  | 	scorer, err := newFileScorer(toFree, time.Now().Unix(), 100) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	// this function returns FileInfo for cached range files.
 | 
					
						
							|  |  |  | 	fiStatRangesFn := func(ranges map[string]string, pathPrefix string) map[string]os.FileInfo { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		fm := make(map[string]os.FileInfo) | 
					
						
							|  |  |  | 		for _, rngFile := range ranges { | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 			fname := pathJoin(pathPrefix, rngFile) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 			if fi, err := os.Stat(fname); err == nil { | 
					
						
							|  |  |  | 				fm[fname] = fi | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return fm | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	// this function returns most recent Atime among cached part files.
 | 
					
						
							|  |  |  | 	lastAtimeFn := func(partNums []int, pathPrefix string) time.Time { | 
					
						
							|  |  |  | 		lastATime := timeSentinel | 
					
						
							|  |  |  | 		for _, pnum := range partNums { | 
					
						
							|  |  |  | 			fname := pathJoin(pathPrefix, fmt.Sprintf("%s.%d", cacheDataFilePrefix, pnum)) | 
					
						
							|  |  |  | 			if fi, err := os.Stat(fname); err == nil { | 
					
						
							|  |  |  | 				if atime.Get(fi).After(lastATime) { | 
					
						
							|  |  |  | 					lastATime = atime.Get(fi) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if len(partNums) == 0 { | 
					
						
							|  |  |  | 			fname := pathJoin(pathPrefix, cacheDataFile) | 
					
						
							|  |  |  | 			if fi, err := os.Stat(fname); err == nil { | 
					
						
							|  |  |  | 				lastATime = atime.Get(fi) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return lastATime | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 	filterFn := func(name string, typ os.FileMode) error { | 
					
						
							|  |  |  | 		if name == minioMetaBucket { | 
					
						
							|  |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 		cacheDir := pathJoin(c.dir, name) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		meta, _, numHits, err := c.statCachedMeta(ctx, cacheDir) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			// delete any partially filled cache entry left behind.
 | 
					
						
							|  |  |  | 			removeAll(cacheDir) | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		// get last access time of cache part files
 | 
					
						
							|  |  |  | 		lastAtime := lastAtimeFn(meta.PartNumbers, pathJoin(c.dir, name)) | 
					
						
							|  |  |  | 		// stat all cached file ranges.
 | 
					
						
							|  |  |  | 		cachedRngFiles := fiStatRangesFn(meta.Ranges, pathJoin(c.dir, name)) | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 		objInfo := meta.ToObjectInfo() | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		// prevent gc from clearing un-synced commits. This metadata is present when
 | 
					
						
							|  |  |  | 		// cache writeback commit setting is enabled.
 | 
					
						
							|  |  |  | 		status, ok := objInfo.UserDefined[writeBackStatusHeader] | 
					
						
							|  |  |  | 		if ok && status != CommitComplete.String() { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		cc := cacheControlOpts(objInfo) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		switch { | 
					
						
							|  |  |  | 		case cc != nil: | 
					
						
							|  |  |  | 			if cc.isStale(objInfo.ModTime) { | 
					
						
							| 
									
										
										
										
											2021-12-07 07:55:29 +08:00
										 |  |  | 				removeAll(cacheDir) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 				scorer.adjustSaveBytes(-objInfo.Size) | 
					
						
							|  |  |  | 				// break early if sufficient disk space reclaimed.
 | 
					
						
							|  |  |  | 				if c.diskUsageLow() { | 
					
						
							|  |  |  | 					// if we found disk usage is already low, we return nil filtering is complete.
 | 
					
						
							|  |  |  | 					return errDoneForNow | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		case lastAtime != timeSentinel: | 
					
						
							|  |  |  | 			// cached multipart or single part
 | 
					
						
							|  |  |  | 			objInfo.AccTime = lastAtime | 
					
						
							|  |  |  | 			objInfo.Name = pathJoin(c.dir, name, cacheDataFile) | 
					
						
							|  |  |  | 			scorer.addFileWithObjInfo(objInfo, numHits) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for fname, fi := range cachedRngFiles { | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 			if fi == nil { | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 			if cc != nil { | 
					
						
							|  |  |  | 				if cc.isStale(objInfo.ModTime) { | 
					
						
							| 
									
										
										
										
											2021-12-07 07:55:29 +08:00
										 |  |  | 					removeAll(fname) | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 					scorer.adjustSaveBytes(-fi.Size()) | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 					// break early if sufficient disk space reclaimed.
 | 
					
						
							|  |  |  | 					if c.diskUsageLow() { | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 						// if we found disk usage is already low, we return nil filtering is complete.
 | 
					
						
							|  |  |  | 						return errDoneForNow | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 				continue | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 			scorer.addFile(fname, atime.Get(fi), fi.Size(), numHits) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		// clean up stale cache.json files for objects that never got cached but access count was maintained in cache.json
 | 
					
						
							|  |  |  | 		fi, err := os.Stat(pathJoin(cacheDir, cacheMetaJSONFile)) | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 		if err != nil || (fi != nil && fi.ModTime().Before(expiry) && len(cachedRngFiles) == 0) { | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 			removeAll(cacheDir) | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 			if fi != nil { | 
					
						
							|  |  |  | 				scorer.adjustSaveBytes(-fi.Size()) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// if we found disk usage is already low, we return nil filtering is complete.
 | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		if c.diskUsageLow() { | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 			return errDoneForNow | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Proceed to next file.
 | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-18 07:34:42 +08:00
										 |  |  | 	if err := readDirFn(c.dir, filterFn); err != nil { | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-04-24 03:26:13 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	scorer.purgeFunc(func(qfile queuedFile) { | 
					
						
							|  |  |  | 		fileName := qfile.name | 
					
						
							|  |  |  | 		removeAll(fileName) | 
					
						
							|  |  |  | 		slashIdx := strings.LastIndex(fileName, SlashSeparator) | 
					
						
							|  |  |  | 		if slashIdx >= 0 { | 
					
						
							|  |  |  | 			fileNamePrefix := fileName[0:slashIdx] | 
					
						
							|  |  |  | 			fname := fileName[slashIdx+1:] | 
					
						
							|  |  |  | 			if fname == cacheDataFile { | 
					
						
							|  |  |  | 				removeAll(fileNamePrefix) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-16 00:05:35 +08:00
										 |  |  | 	scorer.reset() | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // sets cache drive status
 | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | func (c *diskCache) setOffline() { | 
					
						
							|  |  |  | 	atomic.StoreUint32(&c.online, 0) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // returns true if cache drive is online
 | 
					
						
							|  |  |  | func (c *diskCache) IsOnline() bool { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 	return atomic.LoadUint32(&c.online) != 0 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Stat returns ObjectInfo from disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectInfo, numHits int, err error) { | 
					
						
							|  |  |  | 	var partial bool | 
					
						
							|  |  |  | 	var meta *cacheMeta | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// Stat the file to get file size.
 | 
					
						
							|  |  |  | 	meta, partial, numHits, err = c.statCachedMeta(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	if partial { | 
					
						
							|  |  |  | 		return oi, numHits, errFileNotFound | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	oi = meta.ToObjectInfo() | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	oi.Bucket = bucket | 
					
						
							|  |  |  | 	oi.Name = object | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if err = decryptCacheObjectETag(&oi); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | // statCachedMeta returns metadata from cache - including ranges cached, partial to indicate
 | 
					
						
							|  |  |  | // if partial object is cached.
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	cLock := c.NewNSLockFn(cacheObjPath) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := cLock.GetRLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-04-28 07:12:50 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer cLock.RUnlock(lkctx) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return c.statCache(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // statRange returns ObjectInfo and RangeInfo from disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) statRange(ctx context.Context, bucket, object string, rs *HTTPRangeSpec) (oi ObjectInfo, rngInfo RangeInfo, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// Stat the file to get file size.
 | 
					
						
							|  |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	var meta *cacheMeta | 
					
						
							|  |  |  | 	var partial bool | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	meta, partial, numHits, err = c.statCachedMeta(ctx, cacheObjPath) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	oi = meta.ToObjectInfo() | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	oi.Bucket = bucket | 
					
						
							|  |  |  | 	oi.Name = object | 
					
						
							|  |  |  | 	if !partial { | 
					
						
							|  |  |  | 		err = decryptCacheObjectETag(&oi) | 
					
						
							|  |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	actualSize := uint64(meta.Stat.Size) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	var length int64 | 
					
						
							|  |  |  | 	_, length, err = rs.GetOffsetLength(int64(actualSize)) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	actualRngSize := uint64(length) | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		actualRngSize, _ = sio.EncryptedSize(uint64(length)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	rng := rs.String(int64(actualSize)) | 
					
						
							|  |  |  | 	rngFile, ok := meta.Ranges[rng] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return oi, rngInfo, numHits, ObjectNotFound{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 	if _, err = os.Stat(pathJoin(cacheObjPath, rngFile)); err != nil { | 
					
						
							|  |  |  | 		return oi, rngInfo, numHits, ObjectNotFound{Bucket: bucket, Object: object} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	rngInfo = RangeInfo{Range: rng, File: rngFile, Size: int64(actualRngSize)} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	err = decryptCacheObjectETag(&oi) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // statCache is a convenience function for purge() to get ObjectInfo for cached object
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// Stat the file to get file size.
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	metaPath := pathJoin(cacheObjPath, cacheMetaJSONFile) | 
					
						
							|  |  |  | 	f, err := os.Open(metaPath) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return meta, partial, 0, err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 	meta = &cacheMeta{Version: cacheMetaVersion} | 
					
						
							|  |  |  | 	if err := jsonLoad(f, meta); err != nil { | 
					
						
							|  |  |  | 		return meta, partial, 0, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// get metadata of part.1 if full file has been cached.
 | 
					
						
							|  |  |  | 	partial = true | 
					
						
							| 
									
										
										
										
											2021-02-12 11:25:47 +08:00
										 |  |  | 	if _, err := os.Stat(pathJoin(cacheObjPath, cacheDataFile)); err == nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		partial = false | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	if writebackInProgress(meta.Meta) { | 
					
						
							|  |  |  | 		partial = false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return meta, partial, meta.Hits, nil | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // saves object metadata to disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | // incHitsOnly is true if metadata update is incrementing only the hit counter
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | // finalizeWB is true only if metadata update accompanied by moving part from temp location to cache dir.
 | 
					
						
							|  |  |  | func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly, finalizeWB bool) error { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	cachedPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	cLock := c.NewNSLockFn(cachedPath) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := cLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer cLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	if err = c.saveMetadata(ctx, bucket, object, meta, actualSize, rs, rsFileName, incHitsOnly); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// move part saved in writeback directory and cache.json atomically
 | 
					
						
							|  |  |  | 	if finalizeWB { | 
					
						
							|  |  |  | 		wbdir := getCacheWriteBackSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2023-09-13 23:14:36 +08:00
										 |  |  | 		if err = renameAll(pathJoin(wbdir, cacheDataFile), pathJoin(cachedPath, cacheDataFile), c.dir); err != nil { | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		removeAll(wbdir) // cleanup writeback/shadir
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | // saves object metadata to disk cache
 | 
					
						
							|  |  |  | // incHitsOnly is true if metadata update is incrementing only the hit counter
 | 
					
						
							|  |  |  | func (c *diskCache) saveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error { | 
					
						
							|  |  |  | 	cachedPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	metaPath := pathJoin(cachedPath, cacheMetaJSONFile) | 
					
						
							|  |  |  | 	// Create cache directory if needed
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err := os.MkdirAll(cachedPath, 0o777); err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 	f, err := OpenFile(metaPath, os.O_RDWR|os.O_CREATE|writeMode, 0o666) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	m := &cacheMeta{ | 
					
						
							|  |  |  | 		Version: cacheMetaVersion, | 
					
						
							|  |  |  | 		Bucket:  bucket, | 
					
						
							|  |  |  | 		Object:  object, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if err := jsonLoad(f, m); err != nil && err != io.EOF { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	// increment hits
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if rs != nil { | 
					
						
							| 
									
										
										
										
											2020-05-25 15:17:03 +08:00
										 |  |  | 		// rsFileName gets set by putRange. Check for blank values here
 | 
					
						
							|  |  |  | 		// coming from other code paths that set rs only (eg initial creation or hit increment).
 | 
					
						
							|  |  |  | 		if rsFileName != "" { | 
					
						
							|  |  |  | 			if m.Ranges == nil { | 
					
						
							|  |  |  | 				m.Ranges = make(map[string]string) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			m.Ranges[rs.String(actualSize)] = rsFileName | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-10-29 00:23:17 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if rs == nil && !incHitsOnly { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		// this is necessary cleanup of range files if entire object is cached.
 | 
					
						
							| 
									
										
										
										
											2020-10-29 00:23:17 +08:00
										 |  |  | 		if _, err := os.Stat(pathJoin(cachedPath, cacheDataFile)); err == nil { | 
					
						
							|  |  |  | 			for _, f := range m.Ranges { | 
					
						
							|  |  |  | 				removeAll(pathJoin(cachedPath, f)) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			m.Ranges = nil | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	m.Stat.Size = actualSize | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	if !incHitsOnly { | 
					
						
							|  |  |  | 		// reset meta
 | 
					
						
							|  |  |  | 		m.Meta = meta | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		if m.Meta == nil { | 
					
						
							|  |  |  | 			m.Meta = make(map[string]string) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		// save etag in m.Meta if missing
 | 
					
						
							|  |  |  | 		if _, ok := m.Meta["etag"]; !ok { | 
					
						
							|  |  |  | 			if etag, ok := meta["etag"]; ok { | 
					
						
							|  |  |  | 				m.Meta["etag"] = etag | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	m.Hits++ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize} | 
					
						
							|  |  |  | 	return jsonSave(f, m) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | // updates the ETag and ModTime on cache with ETag from backend
 | 
					
						
							|  |  |  | func (c *diskCache) updateMetadata(ctx context.Context, bucket, object, etag string, modTime time.Time, size int64) error { | 
					
						
							|  |  |  | 	cachedPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	metaPath := pathJoin(cachedPath, cacheMetaJSONFile) | 
					
						
							|  |  |  | 	// Create cache directory if needed
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err := os.MkdirAll(cachedPath, 0o777); err != nil { | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 	f, err := OpenFile(metaPath, os.O_RDWR|writeMode, 0o666) | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m := &cacheMeta{ | 
					
						
							|  |  |  | 		Version: cacheMetaVersion, | 
					
						
							|  |  |  | 		Bucket:  bucket, | 
					
						
							|  |  |  | 		Object:  object, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err := jsonLoad(f, m); err != nil && err != io.EOF { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if m.Meta == nil { | 
					
						
							|  |  |  | 		m.Meta = make(map[string]string) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var key []byte | 
					
						
							|  |  |  | 	var objectEncryptionKey crypto.ObjectKey | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		// Calculating object encryption key
 | 
					
						
							| 
									
										
										
										
											2022-09-24 12:17:08 +08:00
										 |  |  | 		key, err = decryptObjectMeta(key, bucket, object, m.Meta) | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		copy(objectEncryptionKey[:], key) | 
					
						
							|  |  |  | 		m.Meta["etag"] = hex.EncodeToString(objectEncryptionKey.SealETag([]byte(etag))) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		m.Meta["etag"] = etag | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	m.Meta["last-modified"] = modTime.UTC().Format(http.TimeFormat) | 
					
						
							|  |  |  | 	m.Meta["Content-Length"] = strconv.Itoa(int(size)) | 
					
						
							|  |  |  | 	return jsonSave(f, m) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | func getCacheSHADir(dir, bucket, object string) string { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	return pathJoin(dir, getSHA256Hash([]byte(pathJoin(bucket, object)))) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | // returns temporary writeback cache location.
 | 
					
						
							|  |  |  | func getCacheWriteBackSHADir(dir, bucket, object string) string { | 
					
						
							|  |  |  | 	return pathJoin(dir, minioMetaBucket, "writeback", getSHA256Hash([]byte(pathJoin(bucket, object)))) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | // Cache data to disk with bitrot checksum added for each block of 1MB
 | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Reader, size uint64) (int64, string, error) { | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err := os.MkdirAll(cachePath, 0o777); err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return 0, "", err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	filePath := pathJoin(cachePath, fileName) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if filePath == "" || reader == nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return 0, "", errInvalidArgument | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err := checkPathLength(filePath); err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return 0, "", err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	f, err := os.Create(filePath) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return 0, "", osErrToFileErr(err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var bytesWritten int64 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	h := HighwayHash256S.New() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bufp := c.pool.Get().(*[]byte) | 
					
						
							|  |  |  | 	defer c.pool.Put(bufp) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	md5Hash := md5.New() | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 	var n, n2 int | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	for { | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 		n, err = io.ReadFull(reader, *bufp) | 
					
						
							|  |  |  | 		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			return 0, "", err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 		eof := err == io.EOF || err == io.ErrUnexpectedEOF | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		if n == 0 && size != 0 { | 
					
						
							|  |  |  | 			// Reached EOF, nothing more to be done.
 | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		h.Reset() | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 		if _, err = h.Write((*bufp)[:n]); err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			return 0, "", err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		hashBytes := h.Sum(nil) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		// compute md5Hash of original data stream if writeback commit to cache
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		if c.commitWriteback || c.commitWritethrough { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			if _, err = md5Hash.Write((*bufp)[:n]); err != nil { | 
					
						
							|  |  |  | 				return 0, "", err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		if _, err = f.Write(hashBytes); err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			return 0, "", err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		if n2, err = f.Write((*bufp)[:n]); err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			return 0, "", err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		bytesWritten += int64(n2) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		if eof { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	md5sumCurr := md5Hash.Sum(nil) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	return bytesWritten, base64.StdEncoding.EncodeToString(md5sumCurr), nil | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-19 09:54:27 +08:00
										 |  |  | func newCacheEncryptReader(ctx context.Context, content io.Reader, bucket, object string, metadata map[string]string) (r io.Reader, err error) { | 
					
						
							|  |  |  | 	objectEncryptionKey, err := newCacheEncryptMetadata(ctx, bucket, object, metadata) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-21 22:54:48 +08:00
										 |  |  | 	reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey, MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()}) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, crypto.ErrInvalidCustomerKey | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return reader, nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-07-19 09:54:27 +08:00
										 |  |  | func newCacheEncryptMetadata(ctx context.Context, bucket, object string, metadata map[string]string) ([]byte, error) { | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	var sealedKey crypto.SealedKey | 
					
						
							|  |  |  | 	if globalCacheKMS == nil { | 
					
						
							|  |  |  | 		return nil, errKMSNotConfigured | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-07-19 09:54:27 +08:00
										 |  |  | 	key, err := globalCacheKMS.GenerateKey(ctx, "", kms.Context{bucket: pathJoin(bucket, object)}) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-15 23:47:33 +08:00
										 |  |  | 	objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader) | 
					
						
							|  |  |  | 	sealedKey = objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, object) | 
					
						
							|  |  |  | 	crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if etag, ok := metadata["etag"]; ok { | 
					
						
							|  |  |  | 		metadata["etag"] = hex.EncodeToString(objectKey.SealETag([]byte(etag))) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	metadata[SSECacheEncrypted] = "" | 
					
						
							|  |  |  | 	return objectKey[:], nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | func (c *diskCache) GetLockContext(ctx context.Context, bucket, object string) (RWLocker, LockContext, error) { | 
					
						
							|  |  |  | 	cachePath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	cLock := c.NewNSLockFn(cachePath) | 
					
						
							|  |  |  | 	lkctx, err := cLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 	return cLock, lkctx, err | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | // Caches the object to disk
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly, writeback bool) (oi ObjectInfo, err error) { | 
					
						
							|  |  |  | 	cLock, lkctx, err := c.GetLockContext(ctx, bucket, object) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, err | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer cLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 	return c.put(ctx, bucket, object, data, size, rs, opts, incHitsOnly, writeback) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Caches the object to disk
 | 
					
						
							|  |  |  | func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly, writeback bool) (oi ObjectInfo, err error) { | 
					
						
							|  |  |  | 	if !c.diskSpaceAvailable(size) { | 
					
						
							| 
									
										
										
										
											2022-09-20 02:05:16 +08:00
										 |  |  | 		io.Copy(io.Discard, data) | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 		return oi, errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	cachePath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	meta, _, numHits, err := c.statCache(ctx, cachePath) | 
					
						
							|  |  |  | 	// Case where object not yet cached
 | 
					
						
							| 
									
										
										
										
											2020-11-24 00:36:49 +08:00
										 |  |  | 	if osIsNotExist(err) && c.after >= 1 { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, c.saveMetadata(ctx, bucket, object, opts.UserDefined, size, nil, "", false) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	// Case where object already has a cache metadata entry but not yet cached
 | 
					
						
							|  |  |  | 	if err == nil && numHits < c.after { | 
					
						
							|  |  |  | 		cETag := extractETag(meta.Meta) | 
					
						
							|  |  |  | 		bETag := extractETag(opts.UserDefined) | 
					
						
							|  |  |  | 		if cETag == bETag { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			return oi, c.saveMetadata(ctx, bucket, object, opts.UserDefined, size, nil, "", false) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		incHitsOnly = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if rs != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, c.putRange(ctx, bucket, object, data, size, rs, opts) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if !c.diskSpaceAvailable(size) { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, errDiskFull | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if writeback { | 
					
						
							|  |  |  | 		cachePath = getCacheWriteBackSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err := os.MkdirAll(cachePath, 0o777); err != nil { | 
					
						
							| 
									
										
										
										
											2022-05-05 19:14:41 +08:00
										 |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	metadata := cloneMSS(opts.UserDefined) | 
					
						
							|  |  |  | 	reader := data | 
					
						
							|  |  |  | 	actualSize := uint64(size) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 	if globalCacheKMS != nil { | 
					
						
							| 
									
										
										
										
											2022-07-19 09:54:27 +08:00
										 |  |  | 		reader, err = newCacheEncryptReader(ctx, data, bucket, object, metadata) | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2022-05-05 19:14:41 +08:00
										 |  |  | 			removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 			return oi, err | 
					
						
							| 
									
										
										
										
											2019-08-24 01:13:22 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		actualSize, _ = sio.EncryptedSize(uint64(size)) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	n, md5sum, err := c.bitrotWriteToCache(cachePath, cacheDataFile, reader, actualSize) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if IsErr(err, baseErrs...) { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 		// take the cache drive offline
 | 
					
						
							|  |  |  | 		c.setOffline() | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-23 21:33:39 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 	if actualSize != uint64(n) { | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		return oi, IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-09-06 02:03:32 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 	if writeback { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		metadata["content-md5"] = md5sum | 
					
						
							|  |  |  | 		if md5bytes, err := base64.StdEncoding.DecodeString(md5sum); err == nil { | 
					
						
							|  |  |  | 			metadata["etag"] = hex.EncodeToString(md5bytes) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		metadata[writeBackStatusHeader] = CommitPending.String() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return ObjectInfo{ | 
					
						
							|  |  |  | 			Bucket:      bucket, | 
					
						
							|  |  |  | 			Name:        object, | 
					
						
							|  |  |  | 			ETag:        metadata["etag"], | 
					
						
							|  |  |  | 			Size:        n, | 
					
						
							|  |  |  | 			UserDefined: metadata, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		c.saveMetadata(ctx, bucket, object, metadata, n, nil, "", incHitsOnly) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Caches the range to disk
 | 
					
						
							|  |  |  | func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions) error { | 
					
						
							|  |  |  | 	rlen, err := rs.GetLength(size) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-09-03 08:48:44 +08:00
										 |  |  | 	if !c.diskSpaceAvailable(rlen) { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		return errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	cachePath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	if err := os.MkdirAll(cachePath, 0o777); err != nil { | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	metadata := cloneMSS(opts.UserDefined) | 
					
						
							|  |  |  | 	reader := data | 
					
						
							|  |  |  | 	actualSize := uint64(rlen) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	// objSize is the actual size of object (with encryption overhead if any)
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	objSize := uint64(size) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if globalCacheKMS != nil { | 
					
						
							| 
									
										
										
										
											2022-07-19 09:54:27 +08:00
										 |  |  | 		reader, err = newCacheEncryptReader(ctx, data, bucket, object, metadata) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		actualSize, _ = sio.EncryptedSize(uint64(rlen)) | 
					
						
							|  |  |  | 		objSize, _ = sio.EncryptedSize(uint64(size)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-10-25 08:44:15 +08:00
										 |  |  | 	cacheFile := mustGetUUID() | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 	n, _, err := c.bitrotWriteToCache(cachePath, cacheFile, reader, actualSize) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if IsErr(err, baseErrs...) { | 
					
						
							| 
									
										
										
										
											2020-05-04 13:35:40 +08:00
										 |  |  | 		// take the cache drive offline
 | 
					
						
							|  |  |  | 		c.setOffline() | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		removeAll(cachePath) | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if actualSize != uint64(n) { | 
					
						
							|  |  |  | 		removeAll(cachePath) | 
					
						
							| 
									
										
										
										
											2020-09-09 05:22:04 +08:00
										 |  |  | 		return IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return c.saveMetadata(ctx, bucket, object, metadata, int64(objSize), rs, cacheFile, false) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // checks streaming bitrot checksum of cached object before returning data
 | 
					
						
							|  |  |  | func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, offset, length int64, writer io.Writer) error { | 
					
						
							|  |  |  | 	h := HighwayHash256S.New() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	checksumHash := make([]byte, h.Size()) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	startBlock := offset / cacheBlkSize | 
					
						
							|  |  |  | 	endBlock := (offset + length) / cacheBlkSize | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// get block start offset
 | 
					
						
							|  |  |  | 	var blockStartOffset int64 | 
					
						
							|  |  |  | 	if startBlock > 0 { | 
					
						
							|  |  |  | 		blockStartOffset = (cacheBlkSize + int64(h.Size())) * startBlock | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	tillLength := (cacheBlkSize + int64(h.Size())) * (endBlock - startBlock + 1) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Start offset cannot be negative.
 | 
					
						
							|  |  |  | 	if offset < 0 { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, errUnexpected) | 
					
						
							|  |  |  | 		return errUnexpected | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Writer cannot be nil.
 | 
					
						
							|  |  |  | 	if writer == nil { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, errUnexpected) | 
					
						
							|  |  |  | 		return errUnexpected | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var blockOffset, blockLength int64 | 
					
						
							|  |  |  | 	rc, err := readCacheFileStream(filePath, blockStartOffset, tillLength) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-11-09 20:20:11 +08:00
										 |  |  | 	defer rc.Close() | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	bufp := c.pool.Get().(*[]byte) | 
					
						
							|  |  |  | 	defer c.pool.Put(bufp) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for block := startBlock; block <= endBlock; block++ { | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case startBlock == endBlock: | 
					
						
							|  |  |  | 			blockOffset = offset % cacheBlkSize | 
					
						
							|  |  |  | 			blockLength = length | 
					
						
							|  |  |  | 		case block == startBlock: | 
					
						
							|  |  |  | 			blockOffset = offset % cacheBlkSize | 
					
						
							|  |  |  | 			blockLength = cacheBlkSize - blockOffset | 
					
						
							|  |  |  | 		case block == endBlock: | 
					
						
							|  |  |  | 			blockOffset = 0 | 
					
						
							|  |  |  | 			blockLength = (offset + length) % cacheBlkSize | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			blockOffset = 0 | 
					
						
							|  |  |  | 			blockLength = cacheBlkSize | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if blockLength == 0 { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if _, err := io.ReadFull(rc, checksumHash); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		h.Reset() | 
					
						
							|  |  |  | 		n, err := io.ReadFull(rc, *bufp) | 
					
						
							|  |  |  | 		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		eof := err == io.EOF || err == io.ErrUnexpectedEOF | 
					
						
							|  |  |  | 		if n == 0 && length != 0 { | 
					
						
							|  |  |  | 			// Reached EOF, nothing more to be done.
 | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if _, e := h.Write((*bufp)[:n]); e != nil { | 
					
						
							|  |  |  | 			return e | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		hashBytes := h.Sum(nil) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if !bytes.Equal(hashBytes, checksumHash) { | 
					
						
							| 
									
										
										
										
											2019-10-02 04:12:15 +08:00
										 |  |  | 			err = fmt.Errorf("hashes do not match expected %s, got %s", | 
					
						
							|  |  |  | 				hex.EncodeToString(checksumHash), hex.EncodeToString(hashBytes)) | 
					
						
							| 
									
										
										
										
											2020-04-10 00:30:02 +08:00
										 |  |  | 			logger.LogIf(GlobalContext, err) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		if _, err = io.Copy(writer, bytes.NewReader((*bufp)[blockOffset:blockOffset+blockLength])); err != nil { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			if err != io.ErrClosedPipe { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2019-10-23 06:04:25 +08:00
										 |  |  | 				return err | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2019-10-23 06:04:25 +08:00
										 |  |  | 			eof = true | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if eof { | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Get returns ObjectInfo and reader for object from disk cache
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	cLock := c.NewNSLockFn(cacheObjPath) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := cLock.GetRLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return nil, numHits, err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer cLock.RUnlock(lkctx) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	var objInfo ObjectInfo | 
					
						
							|  |  |  | 	var rngInfo RangeInfo | 
					
						
							|  |  |  | 	if objInfo, rngInfo, numHits, err = c.statRange(ctx, bucket, object, rs); err != nil { | 
					
						
							|  |  |  | 		return nil, numHits, toObjectErr(err, bucket, object) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	cacheFile := cacheDataFile | 
					
						
							|  |  |  | 	objSize := objInfo.Size | 
					
						
							|  |  |  | 	if !rngInfo.Empty() { | 
					
						
							|  |  |  | 		// for cached ranges, need to pass actual range file size to GetObjectReader
 | 
					
						
							|  |  |  | 		// and clear out range spec
 | 
					
						
							|  |  |  | 		cacheFile = rngInfo.File | 
					
						
							|  |  |  | 		objInfo.Size = rngInfo.Size | 
					
						
							|  |  |  | 		rs = nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-01 09:37:58 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-02 02:01:42 +08:00
										 |  |  | 	if objInfo.IsCompressed() { | 
					
						
							|  |  |  | 		// Cache isn't compressed.
 | 
					
						
							|  |  |  | 		delete(objInfo.UserDefined, ReservedMetadataPrefix+"compression") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	// For a directory, we need to send an reader that returns no bytes.
 | 
					
						
							| 
									
										
										
										
											2019-12-06 15:16:06 +08:00
										 |  |  | 	if HasSuffix(object, SlashSeparator) { | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 		// The lock taken above is released when
 | 
					
						
							|  |  |  | 		// objReader.Close() is called by the caller.
 | 
					
						
							| 
									
										
										
										
											2021-05-01 09:37:58 +08:00
										 |  |  | 		gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return gr, numHits, gerr | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	fn, startOffset, length, nErr := NewGetObjectReader(rs, objInfo, opts) | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	if nErr != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return nil, numHits, nErr | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	var totalBytesRead int64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 	pr, pw := xioutil.WaitPipe() | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if len(objInfo.Parts) > 0 { | 
					
						
							|  |  |  | 		// For negative length read everything.
 | 
					
						
							|  |  |  | 		if length < 0 { | 
					
						
							|  |  |  | 			length = objInfo.Size - startOffset | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Reply back invalid range if the input offset and length fall out of range.
 | 
					
						
							|  |  |  | 		if startOffset > objInfo.Size || startOffset+length > objInfo.Size { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, InvalidRange{startOffset, length, objInfo.Size}, logger.Application) | 
					
						
							|  |  |  | 			return nil, numHits, InvalidRange{startOffset, length, objInfo.Size} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Get start part index and offset.
 | 
					
						
							|  |  |  | 		partIndex, partOffset, err := cacheObjectToPartOffset(objInfo, startOffset) | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 			return nil, numHits, InvalidRange{startOffset, length, objInfo.Size} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Calculate endOffset according to length
 | 
					
						
							|  |  |  | 		endOffset := startOffset | 
					
						
							|  |  |  | 		if length > 0 { | 
					
						
							|  |  |  | 			endOffset += length - 1 | 
					
						
							| 
									
										
										
										
											2019-09-17 05:24:04 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Get last part index to read given length.
 | 
					
						
							|  |  |  | 		lastPartIndex, _, err := cacheObjectToPartOffset(objInfo, endOffset) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, numHits, InvalidRange{startOffset, length, objInfo.Size} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		go func() { | 
					
						
							|  |  |  | 			for ; partIndex <= lastPartIndex; partIndex++ { | 
					
						
							|  |  |  | 				if length == totalBytesRead { | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				partNumber := objInfo.Parts[partIndex].Number | 
					
						
							|  |  |  | 				// Save the current part name and size.
 | 
					
						
							|  |  |  | 				partSize := objInfo.Parts[partIndex].Size | 
					
						
							|  |  |  | 				partLength := partSize - partOffset | 
					
						
							|  |  |  | 				// partLength should be adjusted so that we don't write more data than what was requested.
 | 
					
						
							|  |  |  | 				if partLength > (length - totalBytesRead) { | 
					
						
							|  |  |  | 					partLength = length - totalBytesRead | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				filePath := pathJoin(cacheObjPath, fmt.Sprintf("part.%d", partNumber)) | 
					
						
							|  |  |  | 				err := c.bitrotReadFromCache(ctx, filePath, partOffset, partLength, pw) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					removeAll(cacheObjPath) | 
					
						
							|  |  |  | 					pw.CloseWithError(err) | 
					
						
							|  |  |  | 					break | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				totalBytesRead += partLength | 
					
						
							|  |  |  | 				// partOffset will be valid only for the first part, hence reset it to 0 for
 | 
					
						
							|  |  |  | 				// the remaining parts.
 | 
					
						
							|  |  |  | 				partOffset = 0 | 
					
						
							|  |  |  | 			} // End of read all parts loop.
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 			pw.CloseWithError(err) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		}() | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		go func() { | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 			if writebackInProgress(objInfo.UserDefined) { | 
					
						
							|  |  |  | 				cacheObjPath = getCacheWriteBackSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 			filePath := pathJoin(cacheObjPath, cacheFile) | 
					
						
							|  |  |  | 			err := c.bitrotReadFromCache(ctx, filePath, startOffset, length, pw) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				removeAll(cacheObjPath) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			pw.CloseWithError(err) | 
					
						
							|  |  |  | 		}() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 	// Cleanup function to cause the go routine above to exit, in
 | 
					
						
							|  |  |  | 	// case of incomplete read.
 | 
					
						
							| 
									
										
										
										
											2021-05-12 00:18:37 +08:00
										 |  |  | 	pipeCloser := func() { pr.CloseWithError(nil) } | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-06-25 00:44:00 +08:00
										 |  |  | 	gr, gerr := fn(pr, h, pipeCloser) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	if gerr != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return gr, numHits, gerr | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		// clean up internal SSE cache metadata
 | 
					
						
							| 
									
										
										
										
											2020-12-23 01:19:32 +08:00
										 |  |  | 		delete(gr.ObjInfo.UserDefined, xhttp.AmzServerSideEncryption) | 
					
						
							| 
									
										
										
										
											2019-12-09 05:58:04 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if !rngInfo.Empty() { | 
					
						
							|  |  |  | 		// overlay Size with actual object size and not the range size
 | 
					
						
							|  |  |  | 		gr.ObjInfo.Size = objSize | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return gr, numHits, nil | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | // deletes the cached object - caller should have taken write lock
 | 
					
						
							|  |  |  | func (c *diskCache) delete(bucket, object string) (err error) { | 
					
						
							|  |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	return removeAll(cacheObjPath) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | // Deletes the cached object
 | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | func (c *diskCache) Delete(ctx context.Context, bucket, object string) (err error) { | 
					
						
							|  |  |  | 	cacheObjPath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | 	cLock := c.NewNSLockFn(cacheObjPath) | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	lkctx, err := cLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							| 
									
										
										
										
											2021-03-04 10:36:43 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer cLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2020-02-04 11:40:01 +08:00
										 |  |  | 	return removeAll(cacheObjPath) | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2019-08-10 08:09:08 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // convenience function to check if object is cached on this diskCache
 | 
					
						
							|  |  |  | func (c *diskCache) Exists(ctx context.Context, bucket, object string) bool { | 
					
						
							|  |  |  | 	if _, err := os.Stat(getCacheSHADir(c.dir, bucket, object)); err != nil { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // queues writeback upload failures on server startup
 | 
					
						
							|  |  |  | func (c *diskCache) scanCacheWritebackFailures(ctx context.Context) { | 
					
						
							|  |  |  | 	defer close(c.retryWritebackCh) | 
					
						
							|  |  |  | 	filterFn := func(name string, typ os.FileMode) error { | 
					
						
							|  |  |  | 		if name == minioMetaBucket { | 
					
						
							|  |  |  | 			// Proceed to next file.
 | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		cacheDir := pathJoin(c.dir, name) | 
					
						
							|  |  |  | 		meta, _, _, err := c.statCachedMeta(ctx, cacheDir) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 		objInfo := meta.ToObjectInfo() | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		status, ok := objInfo.UserDefined[writeBackStatusHeader] | 
					
						
							|  |  |  | 		if !ok || status == CommitComplete.String() { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case c.retryWritebackCh <- objInfo: | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-18 07:34:42 +08:00
										 |  |  | 	if err := readDirFn(c.dir, filterFn); err != nil { | 
					
						
							| 
									
										
										
										
											2020-11-03 02:00:45 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // NewMultipartUpload caches multipart uploads when writethrough is MINIO_CACHE_COMMIT mode
 | 
					
						
							|  |  |  | // multiparts are saved in .minio.sys/multipart/cachePath/uploadID dir until finalized. Then the individual parts
 | 
					
						
							|  |  |  | // are moved from the upload dir to cachePath/ directory.
 | 
					
						
							|  |  |  | func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID string, opts ObjectOptions) (uploadID string, err error) { | 
					
						
							|  |  |  | 	uploadID = uID | 
					
						
							|  |  |  | 	if uploadID == "" { | 
					
						
							|  |  |  | 		return "", InvalidUploadID{ | 
					
						
							|  |  |  | 			Bucket:   bucket, | 
					
						
							|  |  |  | 			Object:   object, | 
					
						
							|  |  |  | 			UploadID: uploadID, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	cachePath := getMultipartCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	uploadIDDir := path.Join(cachePath, uploadID) | 
					
						
							| 
									
										
										
										
											2023-09-13 23:14:36 +08:00
										 |  |  | 	if err := mkdirAll(uploadIDDir, 0o777, c.dir); err != nil { | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		return uploadID, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 	f, err := OpenFile(metaPath, os.O_RDWR|os.O_CREATE|writeMode, 0o666) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return uploadID, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m := &cacheMeta{ | 
					
						
							|  |  |  | 		Version: cacheMetaVersion, | 
					
						
							|  |  |  | 		Bucket:  bucket, | 
					
						
							|  |  |  | 		Object:  object, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err := jsonLoad(f, m); err != nil && err != io.EOF { | 
					
						
							|  |  |  | 		return uploadID, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m.Meta = opts.UserDefined | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m.Checksum = CacheChecksumInfoV1{Algorithm: HighwayHash256S.String(), Blocksize: cacheBlkSize} | 
					
						
							|  |  |  | 	m.Stat.ModTime = UTCNow() | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							| 
									
										
										
										
											2021-11-11 00:12:03 +08:00
										 |  |  | 		m.Meta[ReservedMetadataPrefix+"Encrypted-Multipart"] = "" | 
					
						
							| 
									
										
										
										
											2022-07-19 09:54:27 +08:00
										 |  |  | 		if _, err := newCacheEncryptMetadata(ctx, bucket, object, m.Meta); err != nil { | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 			return uploadID, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	err = jsonSave(f, m) | 
					
						
							|  |  |  | 	return uploadID, err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // PutObjectPart caches part to cache multipart path.
 | 
					
						
							|  |  |  | func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, opts ObjectOptions) (partInfo PartInfo, err error) { | 
					
						
							|  |  |  | 	oi := PartInfo{} | 
					
						
							|  |  |  | 	if !c.diskSpaceAvailable(size) { | 
					
						
							| 
									
										
										
										
											2022-09-20 02:05:16 +08:00
										 |  |  | 		io.Copy(io.Discard, data) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		return oi, errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	cachePath := getMultipartCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	uploadIDDir := path.Join(cachePath, uploadID) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	partIDLock := c.NewNSLockFn(pathJoin(uploadIDDir, strconv.Itoa(partID))) | 
					
						
							|  |  |  | 	lkctx, err := partIDLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer partIDLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	meta, _, _, err := c.statCache(ctx, uploadIDDir) | 
					
						
							|  |  |  | 	// Case where object not yet cached
 | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !c.diskSpaceAvailable(size) { | 
					
						
							|  |  |  | 		return oi, errDiskFull | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	reader := data | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	actualSize := uint64(size) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		reader, err = newCachePartEncryptReader(ctx, bucket, object, partID, data, size, meta.Meta) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return oi, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		actualSize, _ = sio.EncryptedSize(uint64(size)) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	n, md5sum, err := c.bitrotWriteToCache(uploadIDDir, fmt.Sprintf("part.%d", partID), reader, actualSize) | 
					
						
							|  |  |  | 	if IsErr(err, baseErrs...) { | 
					
						
							|  |  |  | 		// take the cache drive offline
 | 
					
						
							|  |  |  | 		c.setOffline() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if actualSize != uint64(n) { | 
					
						
							|  |  |  | 		return oi, IncompleteBody{Bucket: bucket, Object: object} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var md5hex string | 
					
						
							|  |  |  | 	if md5bytes, err := base64.StdEncoding.DecodeString(md5sum); err == nil { | 
					
						
							|  |  |  | 		md5hex = hex.EncodeToString(md5bytes) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pInfo := PartInfo{ | 
					
						
							|  |  |  | 		PartNumber:   partID, | 
					
						
							|  |  |  | 		ETag:         md5hex, | 
					
						
							|  |  |  | 		Size:         n, | 
					
						
							|  |  |  | 		ActualSize:   int64(actualSize), | 
					
						
							|  |  |  | 		LastModified: UTCNow(), | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return pInfo, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // SavePartMetadata saves part upload metadata to uploadID directory on disk cache
 | 
					
						
							|  |  |  | func (c *diskCache) SavePartMetadata(ctx context.Context, bucket, object, uploadID string, partID int, pinfo PartInfo) error { | 
					
						
							|  |  |  | 	cachePath := getMultipartCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	uploadDir := path.Join(cachePath, uploadID) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// acquire a write lock at upload path to update cache.json
 | 
					
						
							|  |  |  | 	uploadLock := c.NewNSLockFn(uploadDir) | 
					
						
							|  |  |  | 	ulkctx, err := uploadLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer uploadLock.Unlock(ulkctx) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	metaPath := pathJoin(uploadDir, cacheMetaJSONFile) | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 	f, err := OpenFile(metaPath, os.O_RDWR|writeMode, 0o666) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	m := &cacheMeta{} | 
					
						
							|  |  |  | 	if err := jsonLoad(f, m); err != nil && err != io.EOF { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	var key []byte | 
					
						
							|  |  |  | 	var objectEncryptionKey crypto.ObjectKey | 
					
						
							|  |  |  | 	if globalCacheKMS != nil { | 
					
						
							|  |  |  | 		// Calculating object encryption key
 | 
					
						
							| 
									
										
										
										
											2022-09-24 12:17:08 +08:00
										 |  |  | 		key, err = decryptObjectMeta(key, bucket, object, m.Meta) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		copy(objectEncryptionKey[:], key) | 
					
						
							|  |  |  | 		pinfo.ETag = hex.EncodeToString(objectEncryptionKey.SealETag([]byte(pinfo.ETag))) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pIdx := cacheObjPartIndex(m, partID) | 
					
						
							|  |  |  | 	if pIdx == -1 { | 
					
						
							|  |  |  | 		m.PartActualSizes = append(m.PartActualSizes, pinfo.ActualSize) | 
					
						
							|  |  |  | 		m.PartNumbers = append(m.PartNumbers, pinfo.PartNumber) | 
					
						
							|  |  |  | 		m.PartETags = append(m.PartETags, pinfo.ETag) | 
					
						
							|  |  |  | 		m.PartSizes = append(m.PartSizes, pinfo.Size) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		m.PartActualSizes[pIdx] = pinfo.ActualSize | 
					
						
							|  |  |  | 		m.PartNumbers[pIdx] = pinfo.PartNumber | 
					
						
							|  |  |  | 		m.PartETags[pIdx] = pinfo.ETag | 
					
						
							|  |  |  | 		m.PartSizes[pIdx] = pinfo.Size | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return jsonSave(f, m) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // newCachePartEncryptReader returns encrypted cache part reader, with part data encrypted with part encryption key
 | 
					
						
							|  |  |  | func newCachePartEncryptReader(ctx context.Context, bucket, object string, partID int, content io.Reader, size int64, metadata map[string]string) (r io.Reader, err error) { | 
					
						
							|  |  |  | 	var key []byte | 
					
						
							|  |  |  | 	var objectEncryptionKey, partEncryptionKey crypto.ObjectKey | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Calculating object encryption key
 | 
					
						
							| 
									
										
										
										
											2022-09-24 12:17:08 +08:00
										 |  |  | 	key, err = decryptObjectMeta(key, bucket, object, metadata) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	copy(objectEncryptionKey[:], key) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	partEnckey := objectEncryptionKey.DerivePartKey(uint32(partID)) | 
					
						
							|  |  |  | 	copy(partEncryptionKey[:], partEnckey[:]) | 
					
						
							|  |  |  | 	wantSize := int64(-1) | 
					
						
							|  |  |  | 	if size >= 0 { | 
					
						
							|  |  |  | 		info := ObjectInfo{Size: size} | 
					
						
							|  |  |  | 		wantSize = info.EncryptedSize() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	hReader, err := hash.NewReader(content, wantSize, "", "", size) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	pReader := NewPutObjReader(hReader) | 
					
						
							|  |  |  | 	content, err = pReader.WithEncryption(hReader, &partEncryptionKey) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-06-21 22:54:48 +08:00
										 |  |  | 	reader, err := sio.EncryptReader(content, sio.Config{Key: partEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()}) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, crypto.ErrInvalidCustomerKey | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return reader, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // uploadIDExists returns error if uploadID is not being cached.
 | 
					
						
							|  |  |  | func (c *diskCache) uploadIDExists(bucket, object, uploadID string) (err error) { | 
					
						
							|  |  |  | 	mpartCachePath := getMultipartCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	uploadIDDir := path.Join(mpartCachePath, uploadID) | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 	if _, err := Stat(uploadIDDir); err != nil { | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // CompleteMultipartUpload completes multipart upload on cache. The parts and cache.json are moved from the temporary location in
 | 
					
						
							|  |  |  | // .minio.sys/multipart/cacheSHA/.. to cacheSHA path after part verification succeeds.
 | 
					
						
							|  |  |  | func (c *diskCache) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, roi ObjectInfo, opts ObjectOptions) (oi ObjectInfo, err error) { | 
					
						
							|  |  |  | 	cachePath := getCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	cLock := c.NewNSLockFn(cachePath) | 
					
						
							|  |  |  | 	lkctx, err := cLock.GetLock(ctx, globalOperationTimeout) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ctx = lkctx.Context() | 
					
						
							| 
									
										
										
										
											2022-12-24 11:49:07 +08:00
										 |  |  | 	defer cLock.Unlock(lkctx) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	mpartCachePath := getMultipartCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	uploadIDDir := path.Join(mpartCachePath, uploadID) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	uploadMeta, _, _, uerr := c.statCache(ctx, uploadIDDir) | 
					
						
							|  |  |  | 	if uerr != nil { | 
					
						
							|  |  |  | 		return oi, errUploadIDNotFound | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Case where object not yet cached
 | 
					
						
							|  |  |  | 	// Calculate full object size.
 | 
					
						
							|  |  |  | 	var objectSize int64 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Calculate consolidated actual size.
 | 
					
						
							|  |  |  | 	var objectActualSize int64 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var partETags []string | 
					
						
							|  |  |  | 	partETags, err = decryptCachePartETags(uploadMeta) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for i, pi := range uploadedParts { | 
					
						
							|  |  |  | 		pIdx := cacheObjPartIndex(uploadMeta, pi.PartNumber) | 
					
						
							|  |  |  | 		if pIdx == -1 { | 
					
						
							|  |  |  | 			invp := InvalidPart{ | 
					
						
							|  |  |  | 				PartNumber: pi.PartNumber, | 
					
						
							|  |  |  | 				GotETag:    pi.ETag, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return oi, invp | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		pi.ETag = canonicalizeETag(pi.ETag) | 
					
						
							|  |  |  | 		if partETags[pIdx] != pi.ETag { | 
					
						
							|  |  |  | 			invp := InvalidPart{ | 
					
						
							|  |  |  | 				PartNumber: pi.PartNumber, | 
					
						
							|  |  |  | 				ExpETag:    partETags[pIdx], | 
					
						
							|  |  |  | 				GotETag:    pi.ETag, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return oi, invp | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// All parts except the last part has to be atleast 5MB.
 | 
					
						
							|  |  |  | 		if (i < len(uploadedParts)-1) && !isMinAllowedPartSize(uploadMeta.PartActualSizes[pIdx]) { | 
					
						
							|  |  |  | 			return oi, PartTooSmall{ | 
					
						
							|  |  |  | 				PartNumber: pi.PartNumber, | 
					
						
							|  |  |  | 				PartSize:   uploadMeta.PartActualSizes[pIdx], | 
					
						
							|  |  |  | 				PartETag:   pi.ETag, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Save for total object size.
 | 
					
						
							|  |  |  | 		objectSize += uploadMeta.PartSizes[pIdx] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Save the consolidated actual size.
 | 
					
						
							|  |  |  | 		objectActualSize += uploadMeta.PartActualSizes[pIdx] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	uploadMeta.Stat.Size = objectSize | 
					
						
							|  |  |  | 	uploadMeta.Stat.ModTime = roi.ModTime | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	uploadMeta.Bucket = bucket | 
					
						
							|  |  |  | 	uploadMeta.Object = object | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	// if encrypted - make sure ETag updated
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	uploadMeta.Meta["etag"] = roi.ETag | 
					
						
							|  |  |  | 	uploadMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) | 
					
						
							|  |  |  | 	var cpartETags []string | 
					
						
							|  |  |  | 	var cpartNums []int | 
					
						
							|  |  |  | 	var cpartSizes, cpartActualSizes []int64 | 
					
						
							|  |  |  | 	for _, pi := range uploadedParts { | 
					
						
							|  |  |  | 		pIdx := cacheObjPartIndex(uploadMeta, pi.PartNumber) | 
					
						
							|  |  |  | 		if pIdx != -1 { | 
					
						
							|  |  |  | 			cpartETags = append(cpartETags, uploadMeta.PartETags[pIdx]) | 
					
						
							|  |  |  | 			cpartNums = append(cpartNums, uploadMeta.PartNumbers[pIdx]) | 
					
						
							|  |  |  | 			cpartSizes = append(cpartSizes, uploadMeta.PartSizes[pIdx]) | 
					
						
							|  |  |  | 			cpartActualSizes = append(cpartActualSizes, uploadMeta.PartActualSizes[pIdx]) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	uploadMeta.PartETags = cpartETags | 
					
						
							|  |  |  | 	uploadMeta.PartSizes = cpartSizes | 
					
						
							|  |  |  | 	uploadMeta.PartActualSizes = cpartActualSizes | 
					
						
							|  |  |  | 	uploadMeta.PartNumbers = cpartNums | 
					
						
							|  |  |  | 	uploadMeta.Hits++ | 
					
						
							|  |  |  | 	metaPath := pathJoin(uploadIDDir, cacheMetaJSONFile) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 	f, err := OpenFile(metaPath, os.O_RDWR|os.O_CREATE|writeMode, 0o666) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return oi, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	defer f.Close() | 
					
						
							|  |  |  | 	jsonSave(f, uploadMeta) | 
					
						
							|  |  |  | 	for _, pi := range uploadedParts { | 
					
						
							|  |  |  | 		part := fmt.Sprintf("part.%d", pi.PartNumber) | 
					
						
							| 
									
										
										
										
											2023-09-13 23:14:36 +08:00
										 |  |  | 		renameAll(pathJoin(uploadIDDir, part), pathJoin(cachePath, part), c.dir) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-09-13 23:14:36 +08:00
										 |  |  | 	renameAll(pathJoin(uploadIDDir, cacheMetaJSONFile), pathJoin(cachePath, cacheMetaJSONFile), c.dir) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 	removeAll(uploadIDDir) // clean up any unused parts in the uploadIDDir
 | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 	return uploadMeta.ToObjectInfo(), nil | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (c *diskCache) AbortUpload(bucket, object, uploadID string) (err error) { | 
					
						
							|  |  |  | 	mpartCachePath := getMultipartCacheSHADir(c.dir, bucket, object) | 
					
						
							|  |  |  | 	uploadDir := path.Join(mpartCachePath, uploadID) | 
					
						
							|  |  |  | 	return removeAll(uploadDir) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // cacheObjPartIndex - returns the index of matching object part number.
 | 
					
						
							|  |  |  | func cacheObjPartIndex(m *cacheMeta, partNumber int) int { | 
					
						
							|  |  |  | 	for i, part := range m.PartNumbers { | 
					
						
							|  |  |  | 		if partNumber == part { | 
					
						
							|  |  |  | 			return i | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return -1 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // cacheObjectToPartOffset calculates part index and part offset for requested offset for content on cache.
 | 
					
						
							|  |  |  | func cacheObjectToPartOffset(objInfo ObjectInfo, offset int64) (partIndex int, partOffset int64, err error) { | 
					
						
							|  |  |  | 	if offset == 0 { | 
					
						
							|  |  |  | 		// Special case - if offset is 0, then partIndex and partOffset are always 0.
 | 
					
						
							|  |  |  | 		return 0, 0, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	partOffset = offset | 
					
						
							|  |  |  | 	// Seek until object offset maps to a particular part offset.
 | 
					
						
							|  |  |  | 	for i, part := range objInfo.Parts { | 
					
						
							|  |  |  | 		partIndex = i | 
					
						
							|  |  |  | 		// Offset is smaller than size we have reached the proper part offset.
 | 
					
						
							|  |  |  | 		if partOffset < part.Size { | 
					
						
							|  |  |  | 			return partIndex, partOffset, nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Continue to towards the next part.
 | 
					
						
							|  |  |  | 		partOffset -= part.Size | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Offset beyond the size of the object return InvalidRange.
 | 
					
						
							|  |  |  | 	return 0, 0, InvalidRange{} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // get path of on-going multipart caching
 | 
					
						
							|  |  |  | func getMultipartCacheSHADir(dir, bucket, object string) string { | 
					
						
							|  |  |  | 	return pathJoin(dir, minioMetaBucket, cacheMultipartDir, getSHA256Hash([]byte(pathJoin(bucket, object)))) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // clean up stale cache multipart uploads according to cleanup interval.
 | 
					
						
							|  |  |  | func (c *diskCache) cleanupStaleUploads(ctx context.Context) { | 
					
						
							|  |  |  | 	timer := time.NewTimer(cacheStaleUploadCleanupInterval) | 
					
						
							|  |  |  | 	defer timer.Stop() | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-ctx.Done(): | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		case <-timer.C: | 
					
						
							|  |  |  | 			now := time.Now() | 
					
						
							|  |  |  | 			readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir), func(shaDir string, typ os.FileMode) error { | 
					
						
							|  |  |  | 				return readDirFn(pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir), func(uploadIDDir string, typ os.FileMode) error { | 
					
						
							|  |  |  | 					uploadIDPath := pathJoin(c.dir, minioMetaBucket, cacheMultipartDir, shaDir, uploadIDDir) | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 					fi, err := Stat(uploadIDPath) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						return nil | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					if now.Sub(fi.ModTime()) > cacheStaleUploadExpiry { | 
					
						
							|  |  |  | 						removeAll(uploadIDPath) | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return nil | 
					
						
							|  |  |  | 				}) | 
					
						
							|  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 			// clean up of writeback folder where cache.json no longer exists in the main c.dir/<sha256(bucket,object> path
 | 
					
						
							|  |  |  | 			// and if past upload expiry window.
 | 
					
						
							|  |  |  | 			readDirFn(pathJoin(c.dir, minioMetaBucket, cacheWritebackDir), func(shaDir string, typ os.FileMode) error { | 
					
						
							|  |  |  | 				wbdir := pathJoin(c.dir, minioMetaBucket, cacheWritebackDir, shaDir) | 
					
						
							|  |  |  | 				cachedir := pathJoin(c.dir, shaDir) | 
					
						
							| 
									
										
										
										
											2022-08-02 04:22:43 +08:00
										 |  |  | 				if _, err := Stat(cachedir); os.IsNotExist(err) { | 
					
						
							|  |  |  | 					fi, err := Stat(wbdir) | 
					
						
							| 
									
										
										
										
											2021-12-09 06:52:31 +08:00
										 |  |  | 					if err != nil { | 
					
						
							|  |  |  | 						return nil | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					if now.Sub(fi.ModTime()) > cacheWBStaleUploadExpiry { | 
					
						
							|  |  |  | 						return removeAll(wbdir) | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2022-05-18 13:42:59 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// Reset for the next interval
 | 
					
						
							|  |  |  | 			timer.Reset(cacheStaleUploadCleanupInterval) | 
					
						
							| 
									
										
										
										
											2021-11-01 23:11:58 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } |