| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"bytes" | 
					
						
							|  |  |  | 	"context" | 
					
						
							|  |  |  | 	"encoding/binary" | 
					
						
							|  |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2020-12-05 01:32:35 +08:00
										 |  |  | 	"math" | 
					
						
							| 
									
										
										
										
											2020-09-26 10:21:52 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2021-02-06 01:57:30 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"os" | 
					
						
							|  |  |  | 	"path" | 
					
						
							|  |  |  | 	"strings" | 
					
						
							| 
									
										
										
										
											2020-12-05 01:32:35 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-06 23:52:02 +08:00
										 |  |  | 	"github.com/minio/madmin-go" | 
					
						
							| 
									
										
										
										
											2020-10-15 04:51:51 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/config/heal" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger" | 
					
						
							| 
									
										
										
										
											2021-04-24 00:51:12 +08:00
										 |  |  | 	"github.com/minio/minio/cmd/logger/message/audit" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/bucket/lifecycle" | 
					
						
							| 
									
										
										
										
											2020-07-22 08:49:56 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/bucket/replication" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/color" | 
					
						
							| 
									
										
										
										
											2020-12-18 08:52:47 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/console" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"github.com/minio/minio/pkg/event" | 
					
						
							|  |  |  | 	"github.com/minio/minio/pkg/hash" | 
					
						
							|  |  |  | 	"github.com/willf/bloom" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | const ( | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 	dataScannerSleepPerFolder = 20 * time.Millisecond // Time to wait between folders.
 | 
					
						
							|  |  |  | 	dataScannerStartDelay     = 1 * time.Minute       // Time to wait on startup and between cycles.
 | 
					
						
							|  |  |  | 	dataUsageUpdateDirCycles  = 16                    // Visit all folders every n cycles.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-12 15:08:12 +08:00
										 |  |  | 	healDeleteDangling    = true | 
					
						
							|  |  |  | 	healFolderIncludeProb = 32  // Include a clean folder one in n cycles.
 | 
					
						
							|  |  |  | 	healObjectSelectProb  = 512 // Overall probability of a file being scanned; one in n.
 | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | var ( | 
					
						
							| 
									
										
										
										
											2020-12-05 01:32:35 +08:00
										 |  |  | 	globalHealConfig   heal.Config | 
					
						
							|  |  |  | 	globalHealConfigMu sync.Mutex | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 	dataScannerLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second) | 
					
						
							| 
									
										
										
										
											2020-12-05 01:32:35 +08:00
										 |  |  | 	// Sleeper values are updated when config is loaded.
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 	scannerSleeper = newDynamicSleeper(10, 10*time.Second) | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 	scannerCycle   = &safeDuration{ | 
					
						
							|  |  |  | 		t: dataScannerStartDelay, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | // initDataScanner will start the scanner in the background.
 | 
					
						
							|  |  |  | func initDataScanner(ctx context.Context, objAPI ObjectLayer) { | 
					
						
							|  |  |  | 	go runDataScanner(ctx, objAPI) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-31 04:59:02 +08:00
										 |  |  | type safeDuration struct { | 
					
						
							|  |  |  | 	sync.Mutex | 
					
						
							|  |  |  | 	t time.Duration | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (s *safeDuration) Update(t time.Duration) { | 
					
						
							|  |  |  | 	s.Lock() | 
					
						
							|  |  |  | 	defer s.Unlock() | 
					
						
							|  |  |  | 	s.t = t | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (s *safeDuration) Get() time.Duration { | 
					
						
							|  |  |  | 	s.Lock() | 
					
						
							|  |  |  | 	defer s.Unlock() | 
					
						
							|  |  |  | 	return s.t | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | // runDataScanner will start a data scanner.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // The function will block until the context is canceled.
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | // There should only ever be one scanner running per cluster.
 | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | func runDataScanner(pctx context.Context, objAPI ObjectLayer) { | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 	// Make sure only 1 scanner is running on the cluster.
 | 
					
						
							|  |  |  | 	locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock") | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 	var ctx context.Context | 
					
						
							| 
									
										
										
										
											2020-09-26 10:21:52 +08:00
										 |  |  | 	r := rand.New(rand.NewSource(time.Now().UnixNano())) | 
					
						
							| 
									
										
										
										
											2020-09-19 02:15:54 +08:00
										 |  |  | 	for { | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		lkctx, err := locker.GetLock(pctx, dataScannerLeaderLockTimeout) | 
					
						
							| 
									
										
										
										
											2020-09-19 02:15:54 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2021-03-31 04:59:02 +08:00
										 |  |  | 			time.Sleep(time.Duration(r.Float64() * float64(scannerCycle.Get()))) | 
					
						
							| 
									
										
										
										
											2020-09-19 02:15:54 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-30 11:55:21 +08:00
										 |  |  | 		ctx = lkctx.Context() | 
					
						
							|  |  |  | 		defer lkctx.Cancel() | 
					
						
							| 
									
										
										
										
											2020-09-19 02:15:54 +08:00
										 |  |  | 		break | 
					
						
							|  |  |  | 		// No unlock for "leader" lock.
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	// Load current bloom cycle
 | 
					
						
							|  |  |  | 	nextBloomCycle := intDataUpdateTracker.current() + 1 | 
					
						
							| 
									
										
										
										
											2021-02-06 01:57:30 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	br, err := objAPI.GetObjectNInfo(ctx, dataUsageBucket, dataUsageBloomName, nil, http.Header{}, readLock, ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		if !isErrObjectNotFound(err) && !isErrBucketNotFound(err) { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2021-02-06 01:57:30 +08:00
										 |  |  | 		if br.ObjInfo.Size == 8 { | 
					
						
							|  |  |  | 			if err = binary.Read(br, binary.LittleEndian, &nextBloomCycle); err != nil { | 
					
						
							|  |  |  | 				logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-02-06 01:57:30 +08:00
										 |  |  | 		br.Close() | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-31 04:59:02 +08:00
										 |  |  | 	scannerTimer := time.NewTimer(scannerCycle.Get()) | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	defer scannerTimer.Stop() | 
					
						
							| 
									
										
										
										
											2020-12-18 04:35:02 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	for { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-ctx.Done(): | 
					
						
							|  |  |  | 			return | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 		case <-scannerTimer.C: | 
					
						
							| 
									
										
										
										
											2020-12-18 04:35:02 +08:00
										 |  |  | 			// Reset the timer for next cycle.
 | 
					
						
							| 
									
										
										
										
											2021-03-31 04:59:02 +08:00
										 |  |  | 			scannerTimer.Reset(scannerCycle.Get()) | 
					
						
							| 
									
										
										
										
											2020-12-18 04:35:02 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-18 08:52:47 +08:00
										 |  |  | 			if intDataUpdateTracker.debug { | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 				console.Debugln("starting scanner cycle") | 
					
						
							| 
									
										
										
										
											2020-12-18 08:52:47 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			// Wait before starting next cycle and wait on startup.
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 			results := make(chan madmin.DataUsageInfo, 1) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			go storeDataUsageInBackend(ctx, objAPI, results) | 
					
						
							|  |  |  | 			bf, err := globalNotificationSys.updateBloomFilter(ctx, nextBloomCycle) | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			err = objAPI.NSScanner(ctx, bf, results) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			close(results) | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			if err == nil { | 
					
						
							|  |  |  | 				// Store new cycle...
 | 
					
						
							|  |  |  | 				nextBloomCycle++ | 
					
						
							|  |  |  | 				var tmp [8]byte | 
					
						
							|  |  |  | 				binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle) | 
					
						
							| 
									
										
											  
											
												pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
   reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
											
										 
											2021-02-24 04:31:53 +08:00
										 |  |  | 				r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp))) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-11 00:52:50 +08:00
										 |  |  | 				_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageBloomName, NewPutObjReader(r), ObjectOptions{}) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				if !isErrBucketNotFound(err) { | 
					
						
							|  |  |  | 					logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type cachedFolder struct { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	name              string | 
					
						
							|  |  |  | 	parent            *dataUsageHash | 
					
						
							|  |  |  | 	objectHealProbDiv uint32 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type folderScanner struct { | 
					
						
							| 
									
										
										
										
											2020-10-14 04:45:08 +08:00
										 |  |  | 	root       string | 
					
						
							|  |  |  | 	getSize    getSizeFn | 
					
						
							|  |  |  | 	oldCache   dataUsageCache | 
					
						
							|  |  |  | 	newCache   dataUsageCache | 
					
						
							|  |  |  | 	withFilter *bloomFilter | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	dataUsageScannerDebug bool | 
					
						
							|  |  |  | 	healFolderInclude     uint32 // Include a clean folder one in n cycles.
 | 
					
						
							|  |  |  | 	healObjectSelect      uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	newFolders      []cachedFolder | 
					
						
							|  |  |  | 	existingFolders []cachedFolder | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	disks           []StorageAPI | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | // scanDataFolder will scanner the basepath+cache.Info.Name and return an updated cache.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // The returned cache will always be valid, but may not be updated from the existing.
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | // Before each operation sleepDuration is called which can be used to temporarily halt the scanner.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // If the supplied context is canceled the function will return at the first chance.
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | func scanDataFolder(ctx context.Context, basePath string, cache dataUsageCache, getSize getSizeFn) (dataUsageCache, error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	t := UTCNow() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	logPrefix := color.Green("data-usage: ") | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 	logSuffix := color.Blue("- %v + %v", basePath, cache.Info.Name) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if intDataUpdateTracker.debug { | 
					
						
							|  |  |  | 		defer func() { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			console.Debugf(logPrefix+" Scanner time: %v %s\n", time.Since(t), logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	switch cache.Info.Name { | 
					
						
							|  |  |  | 	case "", dataUsageRoot: | 
					
						
							|  |  |  | 		return cache, errors.New("internal error: root scan attempted") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	skipHeal := cache.Info.SkipHealing | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	s := folderScanner{ | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 		root:                  basePath, | 
					
						
							|  |  |  | 		getSize:               getSize, | 
					
						
							|  |  |  | 		oldCache:              cache, | 
					
						
							|  |  |  | 		newCache:              dataUsageCache{Info: cache.Info}, | 
					
						
							|  |  |  | 		newFolders:            nil, | 
					
						
							|  |  |  | 		existingFolders:       nil, | 
					
						
							|  |  |  | 		dataUsageScannerDebug: intDataUpdateTracker.debug, | 
					
						
							|  |  |  | 		healFolderInclude:     0, | 
					
						
							|  |  |  | 		healObjectSelect:      0, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 	// Add disks for set healing.
 | 
					
						
							|  |  |  | 	if len(cache.Disks) > 0 { | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		objAPI, ok := newObjectLayerFn().(*erasureServerPools) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		if ok { | 
					
						
							|  |  |  | 			s.disks = objAPI.GetDisksID(cache.Disks...) | 
					
						
							|  |  |  | 			if len(s.disks) != len(cache.Disks) { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 				console.Debugf(logPrefix+"Missing disks, want %d, found %d. Cannot heal. %s\n", len(cache.Disks), len(s.disks), logSuffix) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 				s.disks = s.disks[:0] | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	// Enable healing in XL mode.
 | 
					
						
							|  |  |  | 	if globalIsErasure { | 
					
						
							|  |  |  | 		// Include a clean folder one in n cycles.
 | 
					
						
							| 
									
										
										
										
											2020-09-12 15:08:12 +08:00
										 |  |  | 		s.healFolderInclude = healFolderIncludeProb | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 		// Do a heal check on an object once every n cycles. Must divide into healFolderInclude
 | 
					
						
							| 
									
										
										
										
											2020-09-12 15:08:12 +08:00
										 |  |  | 		s.healObjectSelect = healObjectSelectProb | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if len(cache.Info.BloomFilter) > 0 { | 
					
						
							|  |  |  | 		s.withFilter = &bloomFilter{BloomFilter: &bloom.BloomFilter{}} | 
					
						
							| 
									
										
										
										
											2020-12-27 14:58:06 +08:00
										 |  |  | 		_, err := s.withFilter.ReadFrom(bytes.NewReader(cache.Info.BloomFilter)) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err, logPrefix+"Error reading bloom filter") | 
					
						
							|  |  |  | 			s.withFilter = nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	if s.dataUsageScannerDebug { | 
					
						
							|  |  |  | 		console.Debugf(logPrefix+"Start scanning. Bloom filter: %v %s\n", s.withFilter != nil, logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	done := ctx.Done() | 
					
						
							|  |  |  | 	var flattenLevels = 2 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	if s.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 		console.Debugf(logPrefix+"Cycle: %v, Entries: %v %s\n", cache.Info.NextCycle, len(cache.Cache), logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Always scan flattenLevels deep. Cache root is level 0.
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	todo := []cachedFolder{{name: cache.Info.Name, objectHealProbDiv: 1}} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	for i := 0; i < flattenLevels; i++ { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 		if s.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			console.Debugf(logPrefix+"Level %v, scanning %v directories. %s\n", i, len(todo), logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-done: | 
					
						
							|  |  |  | 			return cache, ctx.Err() | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		var err error | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 		todo, err = s.scanQueuedLevels(ctx, todo, i == flattenLevels-1, skipHeal) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			// No useful information...
 | 
					
						
							|  |  |  | 			return cache, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	if s.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 		console.Debugf(logPrefix+"New folders: %v %s\n", s.newFolders, logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Add new folders first
 | 
					
						
							|  |  |  | 	for _, folder := range s.newFolders { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-done: | 
					
						
							|  |  |  | 			return s.newCache, ctx.Err() | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 		du, err := s.deepScanFolder(ctx, folder, skipHeal) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if du == nil { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			console.Debugln(logPrefix + "no disk usage provided" + logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		s.newCache.replace(folder.name, "", *du) | 
					
						
							|  |  |  | 		// Add to parent manually
 | 
					
						
							|  |  |  | 		if folder.parent != nil { | 
					
						
							|  |  |  | 			parent := s.newCache.Cache[folder.parent.Key()] | 
					
						
							|  |  |  | 			parent.addChildString(folder.name) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	if s.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 		console.Debugf(logPrefix+"Existing folders: %v %s\n", len(s.existingFolders), logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Do selective scanning of existing folders.
 | 
					
						
							|  |  |  | 	for _, folder := range s.existingFolders { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-done: | 
					
						
							|  |  |  | 			return s.newCache, ctx.Err() | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		h := hashPath(folder.name) | 
					
						
							|  |  |  | 		if !h.mod(s.oldCache.Info.NextCycle, dataUsageUpdateDirCycles) { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 			if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) { | 
					
						
							|  |  |  | 				s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()]) | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				folder.objectHealProbDiv = s.healFolderInclude | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			folder.objectHealProbDiv = dataUsageUpdateDirCycles | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if s.withFilter != nil { | 
					
						
							|  |  |  | 			_, prefix := path2BucketObjectWithBasePath(basePath, folder.name) | 
					
						
							|  |  |  | 			if s.oldCache.Info.lifeCycle == nil || !s.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) { | 
					
						
							|  |  |  | 				// If folder isn't in filter, skip it completely.
 | 
					
						
							|  |  |  | 				if !s.withFilter.containsDir(folder.name) { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 					if !h.mod(s.oldCache.Info.NextCycle, s.healFolderInclude/folder.objectHealProbDiv) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 						if s.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 							console.Debugf(logPrefix+"Skipping non-updated folder: %v %s\n", folder, logSuffix) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 						} | 
					
						
							|  |  |  | 						s.newCache.replaceHashed(h, folder.parent, s.oldCache.Cache[h.Key()]) | 
					
						
							|  |  |  | 						continue | 
					
						
							|  |  |  | 					} else { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 						if s.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 							console.Debugf(logPrefix+"Adding non-updated folder to heal check: %v %s\n", folder.name, logSuffix) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 						} | 
					
						
							|  |  |  | 						// Update probability of including objects
 | 
					
						
							|  |  |  | 						folder.objectHealProbDiv = s.healFolderInclude | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Update on this cycle...
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 		du, err := s.deepScanFolder(ctx, folder, skipHeal) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if du == nil { | 
					
						
							|  |  |  | 			logger.LogIf(ctx, errors.New("data-usage: no disk usage provided")) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		s.newCache.replaceHashed(h, folder.parent, *du) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 	if s.dataUsageScannerDebug { | 
					
						
							|  |  |  | 		console.Debugf(logPrefix+"Finished scanner, %v entries %s\n", len(s.newCache.Cache), logSuffix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	s.newCache.Info.LastUpdate = UTCNow() | 
					
						
							|  |  |  | 	s.newCache.Info.NextCycle++ | 
					
						
							|  |  |  | 	return s.newCache, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // scanQueuedLevels will scan the provided folders.
 | 
					
						
							|  |  |  | // Files found in the folders will be added to f.newCache.
 | 
					
						
							|  |  |  | // If final is provided folders will be put into f.newFolders or f.existingFolders.
 | 
					
						
							|  |  |  | // If final is not provided the folders found are returned from the function.
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | func (f *folderScanner) scanQueuedLevels(ctx context.Context, folders []cachedFolder, final bool, skipHeal bool) ([]cachedFolder, error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	var nextFolders []cachedFolder | 
					
						
							|  |  |  | 	done := ctx.Done() | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 	scannerLogPrefix := color.Green("folder-scanner:") | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	for _, folder := range folders { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-done: | 
					
						
							|  |  |  | 			return nil, ctx.Err() | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		thisHash := hashPath(folder.name) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 		existing := f.oldCache.findChildrenCopy(thisHash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// If there are lifecycle rules for the prefix, remove the filter.
 | 
					
						
							|  |  |  | 		filter := f.withFilter | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		_, prefix := path2BucketObjectWithBasePath(f.root, folder.name) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		var activeLifeCycle *lifecycle.Lifecycle | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, true) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 				console.Debugf(scannerLogPrefix+" Prefix %q has active rules\n", prefix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 			activeLifeCycle = f.oldCache.Info.lifeCycle | 
					
						
							|  |  |  | 			filter = nil | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		if _, ok := f.oldCache.Cache[thisHash.Key()]; filter != nil && ok { | 
					
						
							|  |  |  | 			// If folder isn't in filter and we have data, skip it completely.
 | 
					
						
							|  |  |  | 			if folder.name != dataUsageRoot && !filter.containsDir(folder.name) { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 				if !thisHash.mod(f.oldCache.Info.NextCycle, f.healFolderInclude/folder.objectHealProbDiv) { | 
					
						
							|  |  |  | 					f.newCache.copyWithChildren(&f.oldCache, thisHash, folder.parent) | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 					if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 						console.Debugf(scannerLogPrefix+" Skipping non-updated folder: %v\n", folder.name) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} else { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 					if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 						console.Debugf(scannerLogPrefix+" Adding non-updated folder to heal check: %v\n", folder.name) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 					// If probability was already scannerHealFolderInclude, keep it.
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 					folder.objectHealProbDiv = f.healFolderInclude | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 		scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		cache := dataUsageEntry{} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		err := readDirFn(path.Join(f.root, folder.name), func(entName string, typ os.FileMode) error { | 
					
						
							|  |  |  | 			// Parse
 | 
					
						
							| 
									
										
										
										
											2021-04-16 07:32:13 +08:00
										 |  |  | 			entName = pathClean(path.Join(folder.name, entName)) | 
					
						
							|  |  |  | 			if entName == "" { | 
					
						
							|  |  |  | 				if f.dataUsageScannerDebug { | 
					
						
							|  |  |  | 					console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return errDoneForNow | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			bucket, prefix := path2BucketObjectWithBasePath(f.root, entName) | 
					
						
							|  |  |  | 			if bucket == "" { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 				if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 					console.Debugf(scannerLogPrefix+" no bucket (%s,%s)\n", f.root, entName) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-02-18 07:34:42 +08:00
										 |  |  | 				return errDoneForNow | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if isReservedOrInvalidBucket(bucket, false) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 				if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 					console.Debugf(scannerLogPrefix+" invalid bucket: %v, entry: %v\n", bucket, entName) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-02-18 07:34:42 +08:00
										 |  |  | 				return errDoneForNow | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			select { | 
					
						
							|  |  |  | 			case <-done: | 
					
						
							| 
									
										
										
										
											2021-02-18 07:34:42 +08:00
										 |  |  | 				return errDoneForNow | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			default: | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			if typ&os.ModeDir != 0 { | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 				scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				h := hashPath(entName) | 
					
						
							|  |  |  | 				_, exists := f.oldCache.Cache[h.Key()] | 
					
						
							|  |  |  | 				cache.addChildString(entName) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 				this := cachedFolder{name: entName, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv} | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 				delete(existing, h.Key()) // h.Key() already accounted for.
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				cache.addChild(h) | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				if final { | 
					
						
							|  |  |  | 					if exists { | 
					
						
							|  |  |  | 						f.existingFolders = append(f.existingFolders, this) | 
					
						
							|  |  |  | 					} else { | 
					
						
							|  |  |  | 						f.newFolders = append(f.newFolders, this) | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				} else { | 
					
						
							|  |  |  | 					nextFolders = append(nextFolders, this) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-10-23 04:36:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			// Dynamic time delay.
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 			wait := scannerSleeper.Timer(ctx) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// Get file size, ignore errors.
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			item := scannerItem{ | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				Path:       path.Join(f.root, entName), | 
					
						
							|  |  |  | 				Typ:        typ, | 
					
						
							|  |  |  | 				bucket:     bucket, | 
					
						
							|  |  |  | 				prefix:     path.Dir(prefix), | 
					
						
							|  |  |  | 				objectName: path.Base(entName), | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 				debug:      f.dataUsageScannerDebug, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				lifeCycle:  activeLifeCycle, | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 				heal:       thisHash.mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 			// if the drive belongs to an erasure set
 | 
					
						
							|  |  |  | 			// that is already being healed, skip the
 | 
					
						
							|  |  |  | 			// healing attempt on this drive.
 | 
					
						
							|  |  |  | 			item.heal = item.heal && !skipHeal | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 			sizeSummary, err := f.getSize(item) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 			if err == errSkipFile { | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 				wait() // wait to proceed to next entry.
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 				return nil | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			// successfully read means we have a valid object.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Remove filename i.e is the meta file to construct object name
 | 
					
						
							|  |  |  | 			item.transformMetaDir() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Object already accounted for, remove from heal map,
 | 
					
						
							|  |  |  | 			// simply because getSize() function already heals the
 | 
					
						
							|  |  |  | 			// object.
 | 
					
						
							|  |  |  | 			delete(existing, path.Join(item.bucket, item.objectPath())) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 			cache.addSizes(sizeSummary) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			cache.Objects++ | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 			cache.ObjSizes.add(sizeSummary.totalSize) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 			wait() // wait to proceed to next entry.
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		}) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if f.healObjectSelect == 0 { | 
					
						
							|  |  |  | 			// If we are not scanning, return now.
 | 
					
						
							|  |  |  | 			f.newCache.replaceHashed(thisHash, folder.parent, cache) | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 05:50:33 +08:00
										 |  |  | 		objAPI, ok := newObjectLayerFn().(*erasureServerPools) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		if !ok || len(f.disks) == 0 { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		bgSeq, found := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) | 
					
						
							|  |  |  | 		if !found { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Whatever remains in 'existing' are folders at this level
 | 
					
						
							|  |  |  | 		// that existed in the previous run but wasn't found now.
 | 
					
						
							|  |  |  | 		//
 | 
					
						
							|  |  |  | 		// This may be because of 2 reasons:
 | 
					
						
							|  |  |  | 		//
 | 
					
						
							|  |  |  | 		// 1) The folder/object was deleted.
 | 
					
						
							|  |  |  | 		// 2) We come from another disk and this disk missed the write.
 | 
					
						
							|  |  |  | 		//
 | 
					
						
							|  |  |  | 		// We therefore perform a heal check.
 | 
					
						
							|  |  |  | 		// If that doesn't bring it back we remove the folder and assume it was deleted.
 | 
					
						
							|  |  |  | 		// This means that the next run will not look for it.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 		// How to resolve results.
 | 
					
						
							|  |  |  | 		resolver := metadataResolutionParams{ | 
					
						
							|  |  |  | 			dirQuorum: getReadQuorum(len(f.disks)), | 
					
						
							|  |  |  | 			objQuorum: getReadQuorum(len(f.disks)), | 
					
						
							|  |  |  | 			bucket:    "", | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 		healObjectsPrefix := color.Green("healObjects:") | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 		for k := range existing { | 
					
						
							|  |  |  | 			bucket, prefix := path2BucketObject(k) | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 				console.Debugf(scannerLogPrefix+" checking disappeared folder: %v/%v\n", bucket, prefix) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			resolver.bucket = bucket | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			foundObjs := false | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 			dangling := false | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			ctx, cancel := context.WithCancel(ctx) | 
					
						
							| 
									
										
										
										
											2021-03-07 01:25:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			err := listPathRaw(ctx, listPathRawOptions{ | 
					
						
							|  |  |  | 				disks:          f.disks, | 
					
						
							|  |  |  | 				bucket:         bucket, | 
					
						
							|  |  |  | 				path:           prefix, | 
					
						
							|  |  |  | 				recursive:      true, | 
					
						
							|  |  |  | 				reportNotFound: true, | 
					
						
							|  |  |  | 				minDisks:       len(f.disks), // We want full consistency.
 | 
					
						
							|  |  |  | 				// Weird, maybe transient error.
 | 
					
						
							|  |  |  | 				agreed: func(entry metaCacheEntry) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 					if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 						console.Debugf(healObjectsPrefix+" got agreement: %v\n", entry.name) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				}, | 
					
						
							|  |  |  | 				// Some disks have data for this.
 | 
					
						
							|  |  |  | 				partial: func(entries metaCacheEntries, nAgreed int, errs []error) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 					if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 						console.Debugf(healObjectsPrefix+" got partial, %d agreed, errs: %v\n", nAgreed, errs) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 					// agreed value less than expected quorum
 | 
					
						
							|  |  |  | 					dangling = nAgreed < resolver.objQuorum || nAgreed < resolver.dirQuorum | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					entry, ok := entries.resolve(&resolver) | 
					
						
							|  |  |  | 					if !ok { | 
					
						
							|  |  |  | 						for _, err := range errs { | 
					
						
							|  |  |  | 							if err != nil { | 
					
						
							|  |  |  | 								return | 
					
						
							|  |  |  | 							} | 
					
						
							|  |  |  | 						} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 						// If no errors, queue it for healing.
 | 
					
						
							|  |  |  | 						entry, _ = entries.firstFound() | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-10-23 04:36:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 					if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 						console.Debugf(healObjectsPrefix+" resolved to: %v, dir: %v\n", entry.name, entry.isDir()) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					if entry.isDir() { | 
					
						
							|  |  |  | 						return | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 					// wait on timer per object.
 | 
					
						
							|  |  |  | 					wait := scannerSleeper.Timer(ctx) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					// We got an entry which we should be able to heal.
 | 
					
						
							|  |  |  | 					fiv, err := entry.fileInfoVersions(bucket) | 
					
						
							|  |  |  | 					if err != nil { | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 						wait() | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 						err := bgSeq.queueHealTask(healSource{ | 
					
						
							|  |  |  | 							bucket:    bucket, | 
					
						
							|  |  |  | 							object:    entry.name, | 
					
						
							|  |  |  | 							versionID: "", | 
					
						
							|  |  |  | 						}, madmin.HealItemObject) | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 						if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 							logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 						foundObjs = foundObjs || err == nil | 
					
						
							|  |  |  | 						return | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					for _, ver := range fiv.Versions { | 
					
						
							|  |  |  | 						// Sleep and reset.
 | 
					
						
							| 
									
										
										
										
											2020-12-05 01:32:35 +08:00
										 |  |  | 						wait() | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 						wait = scannerSleeper.Timer(ctx) | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 						err := bgSeq.queueHealTask(healSource{ | 
					
						
							|  |  |  | 							bucket:    bucket, | 
					
						
							|  |  |  | 							object:    fiv.Name, | 
					
						
							|  |  |  | 							versionID: ver.VersionID, | 
					
						
							|  |  |  | 						}, madmin.HealItemObject) | 
					
						
							| 
									
										
										
										
											2020-12-25 07:02:02 +08:00
										 |  |  | 						if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 							logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 						} | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 						foundObjs = foundObjs || err == nil | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 				}, | 
					
						
							|  |  |  | 				// Too many disks failed.
 | 
					
						
							|  |  |  | 				finished: func(errs []error) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 					if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 						console.Debugf(healObjectsPrefix+" too many errors: %v\n", errs) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					cancel() | 
					
						
							|  |  |  | 				}, | 
					
						
							|  |  |  | 			}) | 
					
						
							| 
									
										
										
										
											2020-10-23 04:36:24 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			if f.dataUsageScannerDebug && err != nil && err != errFileNotFound { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 				console.Debugf(healObjectsPrefix+" checking returned value %v (%T)\n", err, err) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			// If we found one or more disks with this folder, delete it.
 | 
					
						
							|  |  |  | 			if err == nil && dangling { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 				if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 					console.Debugf(healObjectsPrefix+" deleting dangling directory %s\n", prefix) | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2020-12-28 07:42:20 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 				// wait on timer per object.
 | 
					
						
							|  |  |  | 				wait := scannerSleeper.Timer(ctx) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 				objAPI.HealObjects(ctx, bucket, prefix, madmin.HealOpts{ | 
					
						
							|  |  |  | 					Recursive: true, | 
					
						
							| 
									
										
										
										
											2021-02-19 07:16:20 +08:00
										 |  |  | 					Remove:    healDeleteDangling, | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 				}, | 
					
						
							|  |  |  | 					func(bucket, object, versionID string) error { | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 						// Wait for each heal as per scanner frequency.
 | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 						wait() | 
					
						
							|  |  |  | 						return bgSeq.queueHealTask(healSource{ | 
					
						
							|  |  |  | 							bucket:    bucket, | 
					
						
							|  |  |  | 							object:    object, | 
					
						
							|  |  |  | 							versionID: versionID, | 
					
						
							|  |  |  | 						}, madmin.HealItemObject) | 
					
						
							|  |  |  | 					}) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			// Add unless healing returned an error.
 | 
					
						
							| 
									
										
										
										
											2020-12-02 04:07:39 +08:00
										 |  |  | 			if foundObjs { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 				this := cachedFolder{name: k, parent: &thisHash, objectHealProbDiv: folder.objectHealProbDiv} | 
					
						
							|  |  |  | 				cache.addChild(hashPath(k)) | 
					
						
							|  |  |  | 				if final { | 
					
						
							|  |  |  | 					f.existingFolders = append(f.existingFolders, this) | 
					
						
							|  |  |  | 				} else { | 
					
						
							|  |  |  | 					nextFolders = append(nextFolders, this) | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		f.newCache.replaceHashed(thisHash, folder.parent, cache) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nextFolders, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // deepScanFolder will deep scan a folder and return the size if no error occurs.
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | func (f *folderScanner) deepScanFolder(ctx context.Context, folder cachedFolder, skipHeal bool) (*dataUsageEntry, error) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	var cache dataUsageEntry | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	done := ctx.Done() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var addDir func(entName string, typ os.FileMode) error | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	var dirStack = []string{f.root, folder.name} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 	deepScannerLogPrefix := color.Green("deep-scanner:") | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	addDir = func(entName string, typ os.FileMode) error { | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-done: | 
					
						
							| 
									
										
										
										
											2021-02-18 07:34:42 +08:00
										 |  |  | 			return errDoneForNow | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		default: | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if typ&os.ModeDir != 0 { | 
					
						
							|  |  |  | 			dirStack = append(dirStack, entName) | 
					
						
							|  |  |  | 			err := readDirFn(path.Join(dirStack...), addDir) | 
					
						
							|  |  |  | 			dirStack = dirStack[:len(dirStack)-1] | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			scannerSleeper.Sleep(ctx, dataScannerSleepPerFolder) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Dynamic time delay.
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 		wait := scannerSleeper.Timer(ctx) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Get file size, ignore errors.
 | 
					
						
							|  |  |  | 		dirStack = append(dirStack, entName) | 
					
						
							|  |  |  | 		fileName := path.Join(dirStack...) | 
					
						
							|  |  |  | 		dirStack = dirStack[:len(dirStack)-1] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		bucket, prefix := path2BucketObjectWithBasePath(f.root, fileName) | 
					
						
							|  |  |  | 		var activeLifeCycle *lifecycle.Lifecycle | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		if f.oldCache.Info.lifeCycle != nil && f.oldCache.Info.lifeCycle.HasActiveRules(prefix, false) { | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			if f.dataUsageScannerDebug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 				console.Debugf(deepScannerLogPrefix+" Prefix %q has active rules\n", prefix) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 			activeLifeCycle = f.oldCache.Info.lifeCycle | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 		item := scannerItem{ | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 			Path:       fileName, | 
					
						
							|  |  |  | 			Typ:        typ, | 
					
						
							|  |  |  | 			bucket:     bucket, | 
					
						
							|  |  |  | 			prefix:     path.Dir(prefix), | 
					
						
							|  |  |  | 			objectName: path.Base(entName), | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | 			debug:      f.dataUsageScannerDebug, | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 			lifeCycle:  activeLifeCycle, | 
					
						
							|  |  |  | 			heal:       hashPath(path.Join(prefix, entName)).mod(f.oldCache.Info.NextCycle, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// if the drive belongs to an erasure set
 | 
					
						
							|  |  |  | 		// that is already being healed, skip the
 | 
					
						
							|  |  |  | 		// healing attempt on this drive.
 | 
					
						
							|  |  |  | 		item.heal = item.heal && !skipHeal | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 		sizeSummary, err := f.getSize(item) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if err == errSkipFile { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			// Wait to throttle IO
 | 
					
						
							|  |  |  | 			wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 		cache.addSizes(sizeSummary) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		cache.Objects++ | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 		cache.ObjSizes.add(sizeSummary.totalSize) | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Wait to throttle IO
 | 
					
						
							|  |  |  | 		wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	err := readDirFn(path.Join(dirStack...), addDir) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return nil, err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return &cache, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | // scannerItem represents each file while walking.
 | 
					
						
							|  |  |  | type scannerItem struct { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	Path string | 
					
						
							|  |  |  | 	Typ  os.FileMode | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bucket     string // Bucket.
 | 
					
						
							|  |  |  | 	prefix     string // Only the prefix if any, does not have final object name.
 | 
					
						
							|  |  |  | 	objectName string // Only the object name without prefixes.
 | 
					
						
							|  |  |  | 	lifeCycle  *lifecycle.Lifecycle | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	heal       bool // Has the object been selected for heal check?
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	debug      bool | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | type sizeSummary struct { | 
					
						
							|  |  |  | 	totalSize      int64 | 
					
						
							|  |  |  | 	replicatedSize int64 | 
					
						
							|  |  |  | 	pendingSize    int64 | 
					
						
							|  |  |  | 	failedSize     int64 | 
					
						
							|  |  |  | 	replicaSize    int64 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 	pendingCount   uint64 | 
					
						
							|  |  |  | 	failedCount    uint64 | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | type getSizeFn func(item scannerItem) (sizeSummary, error) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // transformMetaDir will transform a directory to prefix/file.ext
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | func (i *scannerItem) transformMetaDir() { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	split := strings.Split(i.prefix, SlashSeparator) | 
					
						
							|  |  |  | 	if len(split) > 1 { | 
					
						
							|  |  |  | 		i.prefix = path.Join(split[:len(split)-1]...) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		i.prefix = "" | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Object name is last element
 | 
					
						
							|  |  |  | 	i.objectName = split[len(split)-1] | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // actionMeta contains information used to apply actions.
 | 
					
						
							|  |  |  | type actionMeta struct { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	oi         ObjectInfo | 
					
						
							|  |  |  | 	bitRotScan bool // indicates if bitrot check was requested.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | var applyActionsLogPrefix = color.Green("applyActions:") | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, meta actionMeta) (size int64) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if i.debug { | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 		if meta.oi.VersionID != "" { | 
					
						
							|  |  |  | 			console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v v(%s)\n", i.bucket, i.objectPath(), meta.oi.VersionID) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			console.Debugf(applyActionsLogPrefix+" heal checking: %v/%v\n", i.bucket, i.objectPath()) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	healOpts := madmin.HealOpts{Remove: healDeleteDangling} | 
					
						
							|  |  |  | 	if meta.bitRotScan { | 
					
						
							|  |  |  | 		healOpts.ScanMode = madmin.HealDeepScan | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	res, err := o.HealObject(ctx, i.bucket, i.objectPath(), meta.oi.VersionID, healOpts) | 
					
						
							|  |  |  | 	if isErrObjectNotFound(err) || isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 		return 0 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err != nil && !errors.Is(err, NotImplemented{}) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 		return 0 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 	return res.ObjectSize | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, meta actionMeta) (applied bool, size int64) { | 
					
						
							|  |  |  | 	size, err := meta.oi.GetActualSize() | 
					
						
							|  |  |  | 	if i.debug { | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if i.lifeCycle == nil { | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		if i.debug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath()) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 		return false, size | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	versionID := meta.oi.VersionID | 
					
						
							|  |  |  | 	action := i.lifeCycle.ComputeAction( | 
					
						
							|  |  |  | 		lifecycle.ObjectOpts{ | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 			Name:                   i.objectPath(), | 
					
						
							|  |  |  | 			UserTags:               meta.oi.UserTags, | 
					
						
							|  |  |  | 			ModTime:                meta.oi.ModTime, | 
					
						
							|  |  |  | 			VersionID:              meta.oi.VersionID, | 
					
						
							|  |  |  | 			DeleteMarker:           meta.oi.DeleteMarker, | 
					
						
							|  |  |  | 			IsLatest:               meta.oi.IsLatest, | 
					
						
							|  |  |  | 			NumVersions:            meta.oi.NumVersions, | 
					
						
							|  |  |  | 			SuccessorModTime:       meta.oi.SuccessorModTime, | 
					
						
							|  |  |  | 			RestoreOngoing:         meta.oi.RestoreOngoing, | 
					
						
							|  |  |  | 			RestoreExpires:         meta.oi.RestoreExpires, | 
					
						
							|  |  |  | 			TransitionStatus:       meta.oi.TransitionStatus, | 
					
						
							|  |  |  | 			RemoteTiersImmediately: globalDebugRemoteTiersImmediately, | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		}) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if i.debug { | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		if versionID != "" { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, action) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			console.Debugf(applyActionsLogPrefix+" lifecycle: %q Initial scan: %v\n", i.objectPath(), action) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	switch action { | 
					
						
							| 
									
										
										
										
											2020-07-05 11:56:02 +08:00
										 |  |  | 	case lifecycle.DeleteAction, lifecycle.DeleteVersionAction: | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: | 
					
						
							|  |  |  | 	case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction: | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 		// No action.
 | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		if i.debug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 			console.Debugf(applyActionsLogPrefix+" object not expirable: %q\n", i.objectPath()) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 		return false, size | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 	obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{ | 
					
						
							|  |  |  | 		VersionID: versionID, | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		switch err.(type) { | 
					
						
							|  |  |  | 		case MethodNotAllowed: // This happens usually for a delete marker
 | 
					
						
							|  |  |  | 			if !obj.DeleteMarker { // if this is not a delete marker log and return
 | 
					
						
							|  |  |  | 				// Do nothing - heal in the future.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 				return false, size | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-01-18 05:58:41 +08:00
										 |  |  | 		case ObjectNotFound, VersionNotFound: | 
					
						
							|  |  |  | 			// object not found or version not found return 0
 | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 			return false, 0 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		default: | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 			// All other errors proceed.
 | 
					
						
							|  |  |  | 			logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 			return false, size | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	action = evalActionFromLifecycle(ctx, *i.lifeCycle, obj, i.debug) | 
					
						
							|  |  |  | 	if action != lifecycle.NoneAction { | 
					
						
							|  |  |  | 		applied = applyLifecycleAction(ctx, action, o, obj) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if applied { | 
					
						
							| 
									
										
										
										
											2021-03-06 06:15:53 +08:00
										 |  |  | 		switch action { | 
					
						
							|  |  |  | 		case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 			return true, size | 
					
						
							| 
									
										
										
										
											2021-03-06 06:15:53 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 		// For all other lifecycle actions that remove data
 | 
					
						
							|  |  |  | 		return true, 0 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return false, size | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // applyActions will apply lifecycle checks on to a scanned item.
 | 
					
						
							|  |  |  | // The resulting size on disk will always be returned.
 | 
					
						
							|  |  |  | // The metadata will be compared to consensus on the object layer before any changes are applied.
 | 
					
						
							|  |  |  | // If no metadata is supplied, -1 is returned if no action is taken.
 | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, meta actionMeta, sizeS *sizeSummary) int64 { | 
					
						
							| 
									
										
										
										
											2021-03-31 17:15:08 +08:00
										 |  |  | 	applied, size := i.applyLifecycle(ctx, o, meta) | 
					
						
							|  |  |  | 	// For instance, an applied lifecycle means we remove/transitioned an object
 | 
					
						
							|  |  |  | 	// from the current deployment, which means we don't have to call healing
 | 
					
						
							|  |  |  | 	// routine even if we are asked to do via heal flag.
 | 
					
						
							| 
									
										
										
										
											2021-04-27 23:24:44 +08:00
										 |  |  | 	if !applied { | 
					
						
							|  |  |  | 		if i.heal { | 
					
						
							|  |  |  | 			size = i.applyHealing(ctx, o, meta) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// replicate only if lifecycle rules are not applied.
 | 
					
						
							|  |  |  | 		i.healReplication(ctx, o, meta.oi.Clone(), sizeS) | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return size | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, obj ObjectInfo, debug bool) (action lifecycle.Action) { | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	lcOpts := lifecycle.ObjectOpts{ | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		Name:                   obj.Name, | 
					
						
							|  |  |  | 		UserTags:               obj.UserTags, | 
					
						
							|  |  |  | 		ModTime:                obj.ModTime, | 
					
						
							|  |  |  | 		VersionID:              obj.VersionID, | 
					
						
							|  |  |  | 		DeleteMarker:           obj.DeleteMarker, | 
					
						
							|  |  |  | 		IsLatest:               obj.IsLatest, | 
					
						
							|  |  |  | 		NumVersions:            obj.NumVersions, | 
					
						
							|  |  |  | 		SuccessorModTime:       obj.SuccessorModTime, | 
					
						
							|  |  |  | 		RestoreOngoing:         obj.RestoreOngoing, | 
					
						
							|  |  |  | 		RestoreExpires:         obj.RestoreExpires, | 
					
						
							|  |  |  | 		TransitionStatus:       obj.TransitionStatus, | 
					
						
							|  |  |  | 		RemoteTiersImmediately: globalDebugRemoteTiersImmediately, | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	action = lc.ComputeAction(lcOpts) | 
					
						
							|  |  |  | 	if debug { | 
					
						
							| 
									
										
										
										
											2020-12-29 17:57:28 +08:00
										 |  |  | 		console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", action) | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if action == lifecycle.NoneAction { | 
					
						
							|  |  |  | 		return action | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-05 11:56:02 +08:00
										 |  |  | 	switch action { | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 	case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction: | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 		// Defensive code, should never happen
 | 
					
						
							|  |  |  | 		if obj.VersionID == "" { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 			return lifecycle.NoneAction | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 		if rcfg, _ := globalBucketObjectLockSys.Get(obj.Bucket); rcfg.LockEnabled { | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 			locked := enforceRetentionForDeletion(ctx, obj) | 
					
						
							|  |  |  | 			if locked { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 				if debug { | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 					if obj.VersionID != "" { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 						console.Debugf(applyActionsLogPrefix+" lifecycle: %s v(%s) is locked, not deleting\n", obj.Name, obj.VersionID) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 					} else { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 						console.Debugf(applyActionsLogPrefix+" lifecycle: %s is locked, not deleting\n", obj.Name) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-08-04 14:04:40 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 				return lifecycle.NoneAction | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-07-05 11:56:02 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	return action | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func applyTransitionAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) bool { | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	srcOpts := ObjectOptions{} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	if obj.TransitionStatus == "" { | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 		srcOpts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket) | 
					
						
							|  |  |  | 		srcOpts.VersionID = obj.VersionID | 
					
						
							|  |  |  | 		// mark transition as pending
 | 
					
						
							|  |  |  | 		obj.UserDefined[ReservedMetadataPrefixLower+TransitionStatus] = lifecycle.TransitionPending | 
					
						
							|  |  |  | 		obj.metadataOnly = true // Perform only metadata updates.
 | 
					
						
							|  |  |  | 		if obj.DeleteMarker { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 			return false | 
					
						
							| 
									
										
										
										
											2020-11-13 04:12:09 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	globalTransitionState.queueTransitionTask(obj) | 
					
						
							|  |  |  | 	return true | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject bool) bool { | 
					
						
							|  |  |  | 	lcOpts := lifecycle.ObjectOpts{ | 
					
						
							|  |  |  | 		Name:             obj.Name, | 
					
						
							|  |  |  | 		UserTags:         obj.UserTags, | 
					
						
							|  |  |  | 		ModTime:          obj.ModTime, | 
					
						
							|  |  |  | 		VersionID:        obj.VersionID, | 
					
						
							|  |  |  | 		DeleteMarker:     obj.DeleteMarker, | 
					
						
							|  |  |  | 		IsLatest:         obj.IsLatest, | 
					
						
							|  |  |  | 		NumVersions:      obj.NumVersions, | 
					
						
							|  |  |  | 		SuccessorModTime: obj.SuccessorModTime, | 
					
						
							|  |  |  | 		RestoreOngoing:   obj.RestoreOngoing, | 
					
						
							|  |  |  | 		RestoreExpires:   obj.RestoreExpires, | 
					
						
							|  |  |  | 		TransitionStatus: obj.TransitionStatus, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	action := expireObj | 
					
						
							|  |  |  | 	if restoredObject { | 
					
						
							|  |  |  | 		action = expireRestoredObj | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if err := expireTransitionedObject(ctx, objLayer, obj.Bucket, obj.Name, lcOpts, obj.transitionedObjName, obj.TransitionTier, action); err != nil { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 		if isErrObjectNotFound(err) || isErrVersionNotFound(err) { | 
					
						
							|  |  |  | 			return false | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-20 01:30:42 +08:00
										 |  |  | 	// Notification already sent in *expireTransitionedObject*, just return 'true' here.
 | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-07 08:10:33 +08:00
										 |  |  | func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, applyOnVersion bool) bool { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	opts := ObjectOptions{} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-07 08:10:33 +08:00
										 |  |  | 	if applyOnVersion { | 
					
						
							|  |  |  | 		opts.VersionID = obj.VersionID | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	if opts.VersionID == "" { | 
					
						
							|  |  |  | 		opts.Versioned = globalBucketVersioningSys.Enabled(obj.Bucket) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	obj, err := objLayer.DeleteObject(ctx, obj.Bucket, obj.Name, opts) | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2021-01-18 05:58:41 +08:00
										 |  |  | 		if isErrObjectNotFound(err) || isErrVersionNotFound(err) { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 			return false | 
					
						
							| 
									
										
										
										
											2021-01-18 05:58:41 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		// Assume it is still there.
 | 
					
						
							|  |  |  | 		logger.LogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 		return false | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-24 00:51:12 +08:00
										 |  |  | 	// Send audit for the lifecycle delete operation
 | 
					
						
							|  |  |  | 	auditLogLifecycle(ctx, obj.Bucket, obj.Name) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-10-17 12:22:12 +08:00
										 |  |  | 	eventName := event.ObjectRemovedDelete | 
					
						
							|  |  |  | 	if obj.DeleteMarker { | 
					
						
							|  |  |  | 		eventName = event.ObjectRemovedDeleteMarkerCreated | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	// Notify object deleted event.
 | 
					
						
							|  |  |  | 	sendEvent(eventArgs{ | 
					
						
							| 
									
										
										
										
											2020-10-17 12:22:12 +08:00
										 |  |  | 		EventName:  eventName, | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 		BucketName: obj.Bucket, | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		Object:     obj, | 
					
						
							|  |  |  | 		Host:       "Internal: [ILM-EXPIRY]", | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Apply object, object version, restored object or restored object version action on the given object
 | 
					
						
							| 
									
										
										
										
											2021-02-07 08:10:33 +08:00
										 |  |  | func applyExpiryRule(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, restoredObject, applyOnVersion bool) bool { | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	if obj.TransitionStatus != "" { | 
					
						
							|  |  |  | 		return applyExpiryOnTransitionedObject(ctx, objLayer, obj, restoredObject) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-02-07 08:10:33 +08:00
										 |  |  | 	return applyExpiryOnNonTransitionedObjects(ctx, objLayer, obj, applyOnVersion) | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-06 06:15:53 +08:00
										 |  |  | // Perform actions (removal or transitioning of objects), return true the action is successfully performed
 | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | func applyLifecycleAction(ctx context.Context, action lifecycle.Action, objLayer ObjectLayer, obj ObjectInfo) (success bool) { | 
					
						
							|  |  |  | 	switch action { | 
					
						
							|  |  |  | 	case lifecycle.DeleteVersionAction, lifecycle.DeleteAction: | 
					
						
							| 
									
										
										
										
											2021-02-07 08:10:33 +08:00
										 |  |  | 		success = applyExpiryRule(ctx, objLayer, obj, false, action == lifecycle.DeleteVersionAction) | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction: | 
					
						
							| 
									
										
										
										
											2021-02-07 08:10:33 +08:00
										 |  |  | 		success = applyExpiryRule(ctx, objLayer, obj, true, action == lifecycle.DeleteRestoredVersionAction) | 
					
						
							| 
									
										
										
										
											2021-02-02 01:52:11 +08:00
										 |  |  | 	case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: | 
					
						
							|  |  |  | 		success = applyTransitionAction(ctx, action, objLayer, obj) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // objectPath returns the prefix and object name.
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | func (i *scannerItem) objectPath() string { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return path.Join(i.prefix, i.objectName) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-22 08:49:56 +08:00
										 |  |  | // healReplication will heal a scanned item that has failed replication.
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) { | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 	if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() { | 
					
						
							| 
									
										
										
										
											2020-12-14 04:05:54 +08:00
										 |  |  | 		// heal delete marker replication failure or versioned delete replication failure
 | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 		if oi.ReplicationStatus == replication.Pending || | 
					
						
							|  |  |  | 			oi.ReplicationStatus == replication.Failed || | 
					
						
							|  |  |  | 			oi.VersionPurgeStatus == Failed || oi.VersionPurgeStatus == Pending { | 
					
						
							|  |  |  | 			i.healReplicationDeletes(ctx, o, oi) | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 	switch oi.ReplicationStatus { | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 	case replication.Pending: | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 		sizeS.pendingCount++ | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 		sizeS.pendingSize += oi.Size | 
					
						
							| 
									
										
										
										
											2021-04-30 09:20:39 +08:00
										 |  |  | 		globalReplicationPool.queueReplicaTask(ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType}) | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 	case replication.Failed: | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 		sizeS.failedSize += oi.Size | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 		sizeS.failedCount++ | 
					
						
							| 
									
										
										
										
											2021-04-30 09:20:39 +08:00
										 |  |  | 		globalReplicationPool.queueReplicaTask(ReplicateObjectInfo{ObjectInfo: oi, OpType: replication.HealReplicationType}) | 
					
						
							| 
									
										
										
										
											2021-01-14 03:52:28 +08:00
										 |  |  | 	case replication.Completed, "COMPLETE": | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 		sizeS.replicatedSize += oi.Size | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 	case replication.Replica: | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 		sizeS.replicaSize += oi.Size | 
					
						
							| 
									
										
										
										
											2020-07-22 08:49:56 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // healReplicationDeletes will heal a scanned deleted item that failed to replicate deletes.
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | func (i *scannerItem) healReplicationDeletes(ctx context.Context, o ObjectLayer, oi ObjectInfo) { | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 	// handle soft delete and permanent delete failures here.
 | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 	if oi.DeleteMarker || !oi.VersionPurgeStatus.Empty() { | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 		versionID := "" | 
					
						
							|  |  |  | 		dmVersionID := "" | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 		if oi.VersionPurgeStatus.Empty() { | 
					
						
							|  |  |  | 			dmVersionID = oi.VersionID | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 			versionID = oi.VersionID | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-04-30 09:20:39 +08:00
										 |  |  | 		globalReplicationPool.queueReplicaDeleteTask(DeletedObjectVersionInfo{ | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 			DeletedObject: DeletedObject{ | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 				ObjectName:                    oi.Name, | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 				DeleteMarkerVersionID:         dmVersionID, | 
					
						
							|  |  |  | 				VersionID:                     versionID, | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 				DeleteMarkerReplicationStatus: string(oi.ReplicationStatus), | 
					
						
							|  |  |  | 				DeleteMarkerMTime:             DeleteMarkerMTime{oi.ModTime}, | 
					
						
							|  |  |  | 				DeleteMarker:                  oi.DeleteMarker, | 
					
						
							|  |  |  | 				VersionPurgeStatus:            oi.VersionPurgeStatus, | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 			}, | 
					
						
							| 
									
										
										
										
											2020-12-29 02:31:00 +08:00
										 |  |  | 			Bucket: oi.Bucket, | 
					
						
							| 
									
										
										
										
											2020-11-20 10:43:58 +08:00
										 |  |  | 		}) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2020-12-05 01:32:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | type dynamicSleeper struct { | 
					
						
							|  |  |  | 	mu sync.RWMutex | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Sleep factor
 | 
					
						
							|  |  |  | 	factor float64 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// maximum sleep cap,
 | 
					
						
							|  |  |  | 	// set to <= 0 to disable.
 | 
					
						
							|  |  |  | 	maxSleep time.Duration | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Don't sleep at all, if time taken is below this value.
 | 
					
						
							|  |  |  | 	// This is to avoid too small costly sleeps.
 | 
					
						
							|  |  |  | 	minSleep time.Duration | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// cycle will be closed
 | 
					
						
							|  |  |  | 	cycle chan struct{} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // newDynamicSleeper
 | 
					
						
							|  |  |  | func newDynamicSleeper(factor float64, maxWait time.Duration) *dynamicSleeper { | 
					
						
							|  |  |  | 	return &dynamicSleeper{ | 
					
						
							|  |  |  | 		factor:   factor, | 
					
						
							|  |  |  | 		cycle:    make(chan struct{}), | 
					
						
							|  |  |  | 		maxSleep: maxWait, | 
					
						
							|  |  |  | 		minSleep: 100 * time.Microsecond, | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Timer returns a timer that has started.
 | 
					
						
							|  |  |  | // When the returned function is called it will wait.
 | 
					
						
							|  |  |  | func (d *dynamicSleeper) Timer(ctx context.Context) func() { | 
					
						
							|  |  |  | 	t := time.Now() | 
					
						
							|  |  |  | 	return func() { | 
					
						
							|  |  |  | 		doneAt := time.Now() | 
					
						
							|  |  |  | 		for { | 
					
						
							|  |  |  | 			// Grab current values
 | 
					
						
							|  |  |  | 			d.mu.RLock() | 
					
						
							|  |  |  | 			minWait, maxWait := d.minSleep, d.maxSleep | 
					
						
							|  |  |  | 			factor := d.factor | 
					
						
							|  |  |  | 			cycle := d.cycle | 
					
						
							|  |  |  | 			d.mu.RUnlock() | 
					
						
							|  |  |  | 			elapsed := doneAt.Sub(t) | 
					
						
							|  |  |  | 			// Don't sleep for really small amount of time
 | 
					
						
							|  |  |  | 			wantSleep := time.Duration(float64(elapsed) * factor) | 
					
						
							|  |  |  | 			if wantSleep <= minWait { | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if maxWait > 0 && wantSleep > maxWait { | 
					
						
							|  |  |  | 				wantSleep = maxWait | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			timer := time.NewTimer(wantSleep) | 
					
						
							|  |  |  | 			select { | 
					
						
							|  |  |  | 			case <-ctx.Done(): | 
					
						
							|  |  |  | 				if !timer.Stop() { | 
					
						
							|  |  |  | 					<-timer.C | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			case <-timer.C: | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			case <-cycle: | 
					
						
							|  |  |  | 				if !timer.Stop() { | 
					
						
							|  |  |  | 					// We expired.
 | 
					
						
							|  |  |  | 					<-timer.C | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Sleep sleeps the specified time multiplied by the sleep factor.
 | 
					
						
							|  |  |  | // If the factor is updated the sleep will be done again with the new factor.
 | 
					
						
							|  |  |  | func (d *dynamicSleeper) Sleep(ctx context.Context, base time.Duration) { | 
					
						
							|  |  |  | 	for { | 
					
						
							|  |  |  | 		// Grab current values
 | 
					
						
							|  |  |  | 		d.mu.RLock() | 
					
						
							|  |  |  | 		minWait, maxWait := d.minSleep, d.maxSleep | 
					
						
							|  |  |  | 		factor := d.factor | 
					
						
							|  |  |  | 		cycle := d.cycle | 
					
						
							|  |  |  | 		d.mu.RUnlock() | 
					
						
							|  |  |  | 		// Don't sleep for really small amount of time
 | 
					
						
							|  |  |  | 		wantSleep := time.Duration(float64(base) * factor) | 
					
						
							|  |  |  | 		if wantSleep <= minWait { | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if maxWait > 0 && wantSleep > maxWait { | 
					
						
							|  |  |  | 			wantSleep = maxWait | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		timer := time.NewTimer(wantSleep) | 
					
						
							|  |  |  | 		select { | 
					
						
							|  |  |  | 		case <-ctx.Done(): | 
					
						
							|  |  |  | 			if !timer.Stop() { | 
					
						
							|  |  |  | 				<-timer.C | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		case <-timer.C: | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		case <-cycle: | 
					
						
							|  |  |  | 			if !timer.Stop() { | 
					
						
							|  |  |  | 				// We expired.
 | 
					
						
							|  |  |  | 				<-timer.C | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Update the current settings and cycle all waiting.
 | 
					
						
							|  |  |  | // Parameters are the same as in the contructor.
 | 
					
						
							|  |  |  | func (d *dynamicSleeper) Update(factor float64, maxWait time.Duration) error { | 
					
						
							|  |  |  | 	d.mu.Lock() | 
					
						
							|  |  |  | 	defer d.mu.Unlock() | 
					
						
							|  |  |  | 	if math.Abs(d.factor-factor) < 1e-10 && d.maxSleep == maxWait { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// Update values and cycle waiting.
 | 
					
						
							|  |  |  | 	close(d.cycle) | 
					
						
							|  |  |  | 	d.factor = factor | 
					
						
							|  |  |  | 	d.maxSleep = maxWait | 
					
						
							|  |  |  | 	d.cycle = make(chan struct{}) | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							| 
									
										
										
										
											2021-04-24 00:51:12 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | func auditLogLifecycle(ctx context.Context, bucket, object string) { | 
					
						
							|  |  |  | 	entry := audit.NewEntry(globalDeploymentID) | 
					
						
							|  |  |  | 	entry.Trigger = "internal-scanner" | 
					
						
							|  |  |  | 	entry.API.Name = "DeleteObject" | 
					
						
							|  |  |  | 	entry.API.Bucket = bucket | 
					
						
							|  |  |  | 	entry.API.Object = object | 
					
						
							|  |  |  | 	ctx = logger.SetAuditEntry(ctx, &entry) | 
					
						
							|  |  |  | 	logger.AuditLog(ctx, nil, nil, nil) | 
					
						
							|  |  |  | } |