| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-08-04 09:17:48 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	"fmt" | 
					
						
							| 
									
										
										
										
											2021-02-18 16:38:37 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2021-02-27 01:52:27 +08:00
										 |  |  | 	"os" | 
					
						
							| 
									
										
										
										
											2022-08-09 07:16:44 +08:00
										 |  |  | 	"runtime" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	"sort" | 
					
						
							| 
									
										
										
										
											2020-05-10 00:54:20 +08:00
										 |  |  | 	"sync" | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	"time" | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-20 08:53:08 +08:00
										 |  |  | 	"github.com/minio/madmin-go/v3" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/dsync" | 
					
						
							| 
									
										
										
										
											2023-08-15 03:28:13 +08:00
										 |  |  | 	xioutil "github.com/minio/minio/internal/ioutil" | 
					
						
							| 
									
										
										
										
											2024-05-25 07:05:23 +08:00
										 |  |  | 	"github.com/minio/pkg/v3/sync/errgroup" | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-01-17 22:07:47 +08:00
										 |  |  | // list all errors that can be ignore in a bucket operation.
 | 
					
						
							|  |  |  | var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // list all errors that can be ignored in a bucket metadata operation.
 | 
					
						
							|  |  |  | var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // OfflineDisk represents an unavailable disk.
 | 
					
						
							|  |  |  | var OfflineDisk StorageAPI // zero value is nil
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // erasureObjects - Implements ER object layer.
 | 
					
						
							|  |  |  | type erasureObjects struct { | 
					
						
							| 
									
										
										
										
											2021-01-17 04:08:02 +08:00
										 |  |  | 	setDriveCount      int | 
					
						
							|  |  |  | 	defaultParityCount int | 
					
						
							| 
									
										
										
										
											2020-12-08 02:04:07 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 	setIndex  int | 
					
						
							|  |  |  | 	poolIndex int | 
					
						
							| 
									
										
										
										
											2021-01-27 05:21:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// getDisks returns list of storageAPIs.
 | 
					
						
							|  |  |  | 	getDisks func() []StorageAPI | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// getLockers returns list of remote and local lockers.
 | 
					
						
							| 
									
										
										
										
											2020-09-26 10:21:52 +08:00
										 |  |  | 	getLockers func() ([]dsync.NetLocker, string) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-02-24 08:19:13 +08:00
										 |  |  | 	// getEndpoints returns list of endpoint belonging this set.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// some may be local and some remote.
 | 
					
						
							| 
									
										
										
										
											2021-09-30 02:36:19 +08:00
										 |  |  | 	getEndpoints func() []Endpoint | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-02-24 08:19:13 +08:00
										 |  |  | 	// getEndpoints returns list of endpoint strings belonging this set.
 | 
					
						
							|  |  |  | 	// some may be local and some remote.
 | 
					
						
							|  |  |  | 	getEndpointStrings func() []string | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Locker mutex map.
 | 
					
						
							|  |  |  | 	nsMutex *nsLockMap | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // NewNSLock - initialize a new namespace RWLocker instance.
 | 
					
						
							| 
									
										
										
										
											2020-11-05 00:25:42 +08:00
										 |  |  | func (er erasureObjects) NewNSLock(bucket string, objects ...string) RWLocker { | 
					
						
							|  |  |  | 	return er.nsMutex.NewNSLock(er.getLockers, bucket, objects...) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Shutdown function for object storage interface.
 | 
					
						
							|  |  |  | func (er erasureObjects) Shutdown(ctx context.Context) error { | 
					
						
							|  |  |  | 	// Add any object layer shutdown activities here.
 | 
					
						
							| 
									
										
										
										
											2022-05-31 01:58:37 +08:00
										 |  |  | 	closeStorageDisks(er.getDisks()...) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-12-29 04:41:52 +08:00
										 |  |  | // defaultWQuorum write quorum based on setDriveCount and defaultParityCount
 | 
					
						
							|  |  |  | func (er erasureObjects) defaultWQuorum() int { | 
					
						
							|  |  |  | 	dataCount := er.setDriveCount - er.defaultParityCount | 
					
						
							|  |  |  | 	if dataCount == er.defaultParityCount { | 
					
						
							|  |  |  | 		return dataCount + 1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return dataCount | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | func diskErrToDriveState(err error) (state string) { | 
					
						
							| 
									
										
										
										
											2020-08-04 09:17:48 +08:00
										 |  |  | 	switch { | 
					
						
							| 
									
										
										
										
											2022-10-18 18:01:16 +08:00
										 |  |  | 	case errors.Is(err, errDiskNotFound) || errors.Is(err, context.DeadlineExceeded): | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		state = madmin.DriveStateOffline | 
					
						
							| 
									
										
										
										
											2024-01-13 06:48:44 +08:00
										 |  |  | 	case errors.Is(err, errCorruptedFormat) || errors.Is(err, errCorruptedBackend): | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		state = madmin.DriveStateCorrupt | 
					
						
							| 
									
										
										
										
											2020-08-04 09:17:48 +08:00
										 |  |  | 	case errors.Is(err, errUnformattedDisk): | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		state = madmin.DriveStateUnformatted | 
					
						
							| 
									
										
										
										
											2020-08-04 09:17:48 +08:00
										 |  |  | 	case errors.Is(err, errDiskAccessDenied): | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		state = madmin.DriveStatePermission | 
					
						
							| 
									
										
										
										
											2020-08-04 09:17:48 +08:00
										 |  |  | 	case errors.Is(err, errFaultyDisk): | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		state = madmin.DriveStateFaulty | 
					
						
							| 
									
										
										
										
											2020-08-04 09:17:48 +08:00
										 |  |  | 	case err == nil: | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		state = madmin.DriveStateOk | 
					
						
							| 
									
										
										
										
											2022-10-14 07:41:44 +08:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 		state = fmt.Sprintf("%s (cause: %s)", madmin.DriveStateUnknown, err) | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2022-10-14 07:41:44 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 	return | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	onlineDisks = make(madmin.BackendDisks) | 
					
						
							|  |  |  | 	offlineDisks = make(madmin.BackendDisks) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | 	for _, disk := range disksInfo { | 
					
						
							|  |  |  | 		ep := disk.Endpoint | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 		if _, ok := offlineDisks[ep]; !ok { | 
					
						
							|  |  |  | 			offlineDisks[ep] = 0 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 		if _, ok := onlineDisks[ep]; !ok { | 
					
						
							|  |  |  | 			onlineDisks[ep] = 0 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-05-10 00:54:20 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | 	// Wait for the routines.
 | 
					
						
							|  |  |  | 	for _, disk := range disksInfo { | 
					
						
							|  |  |  | 		ep := disk.Endpoint | 
					
						
							|  |  |  | 		state := disk.State | 
					
						
							|  |  |  | 		if state != madmin.DriveStateOk && state != madmin.DriveStateUnformatted { | 
					
						
							|  |  |  | 			offlineDisks[ep]++ | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		onlineDisks[ep]++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	rootDiskCount := 0 | 
					
						
							|  |  |  | 	for _, di := range disksInfo { | 
					
						
							|  |  |  | 		if di.RootDisk { | 
					
						
							|  |  |  | 			rootDiskCount++ | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Count offline disks as well to ensure consistent
 | 
					
						
							|  |  |  | 	// reportability of offline drives on local setups.
 | 
					
						
							|  |  |  | 	if len(disksInfo) == (rootDiskCount + offlineDisks.Sum()) { | 
					
						
							|  |  |  | 		// Success.
 | 
					
						
							|  |  |  | 		return onlineDisks, offlineDisks | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Root disk should be considered offline
 | 
					
						
							|  |  |  | 	for i := range disksInfo { | 
					
						
							|  |  |  | 		ep := disksInfo[i].Endpoint | 
					
						
							|  |  |  | 		if disksInfo[i].RootDisk { | 
					
						
							|  |  |  | 			offlineDisks[ep]++ | 
					
						
							|  |  |  | 			onlineDisks[ep]-- | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return onlineDisks, offlineDisks | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // getDisksInfo - fetch disks info across all other storage API.
 | 
					
						
							| 
									
										
										
										
											2023-12-22 08:56:43 +08:00
										 |  |  | func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disksInfo []madmin.Disk) { | 
					
						
							| 
									
										
										
										
											2020-12-22 01:35:19 +08:00
										 |  |  | 	disksInfo = make([]madmin.Disk, len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	g := errgroup.WithNErrs(len(disks)) | 
					
						
							|  |  |  | 	for index := range disks { | 
					
						
							|  |  |  | 		index := index | 
					
						
							|  |  |  | 		g.Go(func() error { | 
					
						
							| 
									
										
										
										
											2023-07-20 22:48:21 +08:00
										 |  |  | 			di := madmin.Disk{ | 
					
						
							|  |  |  | 				Endpoint:  endpoints[index].String(), | 
					
						
							|  |  |  | 				PoolIndex: endpoints[index].PoolIdx, | 
					
						
							|  |  |  | 				SetIndex:  endpoints[index].SetIdx, | 
					
						
							|  |  |  | 				DiskIndex: endpoints[index].DiskIdx, | 
					
						
							| 
									
										
										
										
											2024-03-01 00:37:57 +08:00
										 |  |  | 				Local:     endpoints[index].IsLocal, | 
					
						
							| 
									
										
										
										
											2023-07-20 22:48:21 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			if disks[index] == OfflineDisk { | 
					
						
							| 
									
										
										
										
											2023-07-20 22:48:21 +08:00
										 |  |  | 				di.State = diskErrToDriveState(errDiskNotFound) | 
					
						
							|  |  |  | 				disksInfo[index] = di | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | 				return nil | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2024-01-26 04:45:46 +08:00
										 |  |  | 			info, err := disks[index].DiskInfo(context.TODO(), DiskInfoOptions{Metrics: metrics}) | 
					
						
							| 
									
										
										
										
											2023-07-20 22:48:21 +08:00
										 |  |  | 			di.DrivePath = info.MountPath | 
					
						
							|  |  |  | 			di.TotalSpace = info.Total | 
					
						
							|  |  |  | 			di.UsedSpace = info.Used | 
					
						
							|  |  |  | 			di.AvailableSpace = info.Free | 
					
						
							|  |  |  | 			di.UUID = info.ID | 
					
						
							|  |  |  | 			di.Major = info.Major | 
					
						
							|  |  |  | 			di.Minor = info.Minor | 
					
						
							|  |  |  | 			di.RootDisk = info.RootDisk | 
					
						
							|  |  |  | 			di.Healing = info.Healing | 
					
						
							|  |  |  | 			di.Scanning = info.Scanning | 
					
						
							|  |  |  | 			di.State = diskErrToDriveState(err) | 
					
						
							|  |  |  | 			di.FreeInodes = info.FreeInodes | 
					
						
							|  |  |  | 			di.UsedInodes = info.UsedInodes | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 			if info.Healing { | 
					
						
							|  |  |  | 				if hi := disks[index].Healing(); hi != nil { | 
					
						
							|  |  |  | 					hd := hi.toHealingDisk() | 
					
						
							|  |  |  | 					di.HealInfo = &hd | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-03-17 11:06:57 +08:00
										 |  |  | 			di.Metrics = &madmin.DiskMetrics{ | 
					
						
							| 
									
										
										
										
											2023-08-02 03:47:50 +08:00
										 |  |  | 				LastMinute:              make(map[string]madmin.TimedAction, len(info.Metrics.LastMinute)), | 
					
						
							|  |  |  | 				APICalls:                make(map[string]uint64, len(info.Metrics.APICalls)), | 
					
						
							|  |  |  | 				TotalErrorsAvailability: info.Metrics.TotalErrorsAvailability, | 
					
						
							|  |  |  | 				TotalErrorsTimeout:      info.Metrics.TotalErrorsTimeout, | 
					
						
							| 
									
										
										
										
											2024-01-20 06:51:36 +08:00
										 |  |  | 				TotalWaiting:            info.Metrics.TotalWaiting, | 
					
						
							| 
									
										
										
										
											2021-03-17 11:06:57 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2022-07-06 05:45:49 +08:00
										 |  |  | 			for k, v := range info.Metrics.LastMinute { | 
					
						
							|  |  |  | 				if v.N > 0 { | 
					
						
							|  |  |  | 					di.Metrics.LastMinute[k] = v.asTimedAction() | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-03-17 11:06:57 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			for k, v := range info.Metrics.APICalls { | 
					
						
							|  |  |  | 				di.Metrics.APICalls[k] = v | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 			if info.Total > 0 { | 
					
						
							|  |  |  | 				di.Utilization = float64(info.Used / info.Total * 100) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			disksInfo[index] = di | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | 			return nil | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		}, index) | 
					
						
							| 
									
										
										
										
											2020-05-10 00:54:20 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | 	g.Wait() | 
					
						
							|  |  |  | 	return disksInfo | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // Get an aggregated storage info across all disks.
 | 
					
						
							| 
									
										
										
										
											2023-12-22 08:56:43 +08:00
										 |  |  | func getStorageInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) StorageInfo { | 
					
						
							|  |  |  | 	disksInfo := getDisksInfo(disks, endpoints, metrics) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Sort so that the first element is the smallest.
 | 
					
						
							| 
									
										
										
										
											2023-04-25 04:28:18 +08:00
										 |  |  | 	sort.Slice(disksInfo, func(i, j int) bool { | 
					
						
							|  |  |  | 		return disksInfo[i].TotalSpace < disksInfo[j].TotalSpace | 
					
						
							|  |  |  | 	}) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	storageInfo := StorageInfo{ | 
					
						
							| 
									
										
										
										
											2020-07-14 00:51:07 +08:00
										 |  |  | 		Disks: disksInfo, | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-05 06:36:23 +08:00
										 |  |  | 	storageInfo.Backend.Type = madmin.Erasure | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | 	return storageInfo | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // StorageInfo - returns underlying storage statistics.
 | 
					
						
							| 
									
										
										
										
											2022-12-02 06:31:35 +08:00
										 |  |  | func (er erasureObjects) StorageInfo(ctx context.Context) StorageInfo { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	disks := er.getDisks() | 
					
						
							| 
									
										
										
										
											2020-06-30 04:07:26 +08:00
										 |  |  | 	endpoints := er.getEndpoints() | 
					
						
							| 
									
										
										
										
											2023-12-22 08:56:43 +08:00
										 |  |  | 	return getStorageInfo(disks, endpoints, true) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-03-03 09:28:04 +08:00
										 |  |  | // LocalStorageInfo - returns underlying local storage statistics.
 | 
					
						
							| 
									
										
										
										
											2023-12-22 08:56:43 +08:00
										 |  |  | func (er erasureObjects) LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo { | 
					
						
							| 
									
										
										
										
											2021-09-30 02:36:19 +08:00
										 |  |  | 	disks := er.getDisks() | 
					
						
							|  |  |  | 	endpoints := er.getEndpoints() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	var localDisks []StorageAPI | 
					
						
							|  |  |  | 	var localEndpoints []Endpoint | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for i, endpoint := range endpoints { | 
					
						
							|  |  |  | 		if endpoint.IsLocal { | 
					
						
							|  |  |  | 			localDisks = append(localDisks, disks[i]) | 
					
						
							|  |  |  | 			localEndpoints = append(localEndpoints, endpoint) | 
					
						
							| 
									
										
										
										
											2021-03-03 09:28:04 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-09-30 02:36:19 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-12-22 08:56:43 +08:00
										 |  |  | 	return getStorageInfo(localDisks, localEndpoints, metrics) | 
					
						
							| 
									
										
										
										
											2021-03-03 09:28:04 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | // getOnlineDisksWithHealingAndInfo - returns online disks and overall healing status.
 | 
					
						
							| 
									
										
										
										
											2024-04-26 14:32:14 +08:00
										 |  |  | // Disks are ordered in the following groups:
 | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | // - Non-scanning disks
 | 
					
						
							|  |  |  | // - Non-healing disks
 | 
					
						
							|  |  |  | // - Healing disks (if inclHealing is true)
 | 
					
						
							| 
									
										
										
										
											2024-04-26 14:32:14 +08:00
										 |  |  | func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (newDisks []StorageAPI, newInfos []DiskInfo, healing int) { | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	disks := er.getDisks() | 
					
						
							|  |  |  | 	infos := make([]DiskInfo, len(disks)) | 
					
						
							| 
									
										
										
										
											2023-07-14 17:25:40 +08:00
										 |  |  | 	r := rand.New(rand.NewSource(time.Now().UnixNano())) | 
					
						
							|  |  |  | 	for _, i := range r.Perm(len(disks)) { | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 		i := i | 
					
						
							|  |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func() { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-07-14 17:25:40 +08:00
										 |  |  | 			disk := disks[i] | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 			if disk == nil { | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 				infos[i].Error = errDiskNotFound.Error() | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-26 04:45:46 +08:00
										 |  |  | 			di, err := disk.DiskInfo(context.Background(), DiskInfoOptions{}) | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 			infos[i] = di | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 			if err != nil { | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 				// - Do not consume disks which are not reachable
 | 
					
						
							|  |  |  | 				//   unformatted or simply not accessible for some reason.
 | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 				infos[i].Error = err.Error() | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		}() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 	var scanningDisks, healingDisks []StorageAPI | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 	var scanningInfos, healingInfos []DiskInfo | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	for i, info := range infos { | 
					
						
							|  |  |  | 		// Check if one of the drives in the set is being healed.
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 		// this information is used by scanner to skip healing
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 		// this erasure set while it calculates the usage.
 | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 		if info.Error != "" || disks[i] == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if info.Healing { | 
					
						
							| 
									
										
										
										
											2024-04-26 14:32:14 +08:00
										 |  |  | 			healing++ | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 			if inclHealing { | 
					
						
							|  |  |  | 				healingDisks = append(healingDisks, disks[i]) | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 				healingInfos = append(healingInfos, infos[i]) | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-08-10 03:51:47 +08:00
										 |  |  | 		if !info.Scanning { | 
					
						
							|  |  |  | 			newDisks = append(newDisks, disks[i]) | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 			newInfos = append(newInfos, infos[i]) | 
					
						
							| 
									
										
										
										
											2023-08-10 03:51:47 +08:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			scanningDisks = append(scanningDisks, disks[i]) | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 			scanningInfos = append(scanningInfos, infos[i]) | 
					
						
							| 
									
										
										
										
											2023-08-10 03:51:47 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 	// Prefer non-scanning disks over disks which are currently being scanned.
 | 
					
						
							| 
									
										
										
										
											2023-08-10 03:51:47 +08:00
										 |  |  | 	newDisks = append(newDisks, scanningDisks...) | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 	newInfos = append(newInfos, scanningInfos...) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-12-01 16:18:04 +08:00
										 |  |  | 	/// Then add healing disks.
 | 
					
						
							|  |  |  | 	newDisks = append(newDisks, healingDisks...) | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 	newInfos = append(newInfos, healingInfos...) | 
					
						
							| 
									
										
										
										
											2023-08-10 03:51:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-12-30 07:52:41 +08:00
										 |  |  | 	return newDisks, newInfos, healing | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-04-26 14:32:14 +08:00
										 |  |  | func (er erasureObjects) getOnlineDisksWithHealing(inclHealing bool) ([]StorageAPI, bool) { | 
					
						
							|  |  |  | 	newDisks, _, healing := er.getOnlineDisksWithHealingAndInfo(inclHealing) | 
					
						
							|  |  |  | 	return newDisks, healing > 0 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 01:52:27 +08:00
										 |  |  | // Clean-up previously deleted objects. from .minio.sys/tmp/.trash/
 | 
					
						
							|  |  |  | func (er erasureObjects) cleanupDeletedObjects(ctx context.Context) { | 
					
						
							|  |  |  | 	var wg sync.WaitGroup | 
					
						
							| 
									
										
										
										
											2023-10-25 14:33:25 +08:00
										 |  |  | 	for _, disk := range er.getLocalDisks() { | 
					
						
							| 
									
										
										
										
											2024-04-15 18:02:39 +08:00
										 |  |  | 		if disk == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2021-02-27 01:52:27 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2024-04-15 18:02:39 +08:00
										 |  |  | 		wg.Add(1) | 
					
						
							|  |  |  | 		go func(disk StorageAPI) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			drivePath := disk.Endpoint().Path | 
					
						
							|  |  |  | 			readDirFn(pathJoin(drivePath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error { | 
					
						
							|  |  |  | 				w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) | 
					
						
							|  |  |  | 				return w.Run(func() error { | 
					
						
							|  |  |  | 					wait := deleteCleanupSleeper.Timer(ctx) | 
					
						
							|  |  |  | 					removeAll(pathJoin(drivePath, minioMetaTmpDeletedBucket, ddir)) | 
					
						
							|  |  |  | 					wait() | 
					
						
							|  |  |  | 					return nil | 
					
						
							|  |  |  | 				}) | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		}(disk) | 
					
						
							| 
									
										
										
										
											2021-02-27 01:52:27 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	wg.Wait() | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-27 07:11:42 +08:00
										 |  |  | // nsScanner will start scanning buckets and send updated totals as they are traversed.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | // Updates are sent on a regular basis and the caller *must* consume them.
 | 
					
						
							| 
									
										
										
										
											2023-02-24 11:33:31 +08:00
										 |  |  | func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wantCycle uint32, updates chan<- dataUsageCache, healScanMode madmin.HealScanMode) error { | 
					
						
							| 
									
										
										
										
											2020-08-26 01:55:15 +08:00
										 |  |  | 	if len(buckets) == 0 { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	// Collect disks we can use.
 | 
					
						
							| 
									
										
										
										
											2024-06-19 00:11:04 +08:00
										 |  |  | 	disks, healing := er.getOnlineDisksWithHealing(false) | 
					
						
							| 
									
										
										
										
											2020-08-26 01:55:15 +08:00
										 |  |  | 	if len(disks) == 0 { | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 		scannerLogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) | 
					
						
							| 
									
										
										
										
											2020-05-10 00:54:20 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Load bucket totals
 | 
					
						
							|  |  |  | 	oldCache := dataUsageCache{} | 
					
						
							| 
									
										
										
										
											2020-09-29 10:39:32 +08:00
										 |  |  | 	if err := oldCache.load(ctx, er, dataUsageCacheName); err != nil { | 
					
						
							| 
									
										
										
										
											2018-04-06 06:04:40 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2017-08-15 09:08:42 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// New cache..
 | 
					
						
							|  |  |  | 	cache := dataUsageCache{ | 
					
						
							|  |  |  | 		Info: dataUsageCacheInfo{ | 
					
						
							|  |  |  | 			Name:      dataUsageRoot, | 
					
						
							|  |  |  | 			NextCycle: oldCache.Info.NextCycle, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 		Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)), | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Put all buckets into channel.
 | 
					
						
							|  |  |  | 	bucketCh := make(chan BucketInfo, len(buckets)) | 
					
						
							| 
									
										
										
										
											2023-07-14 17:25:40 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Shuffle buckets to ensure total randomness of buckets, being scanned.
 | 
					
						
							|  |  |  | 	// Otherwise same set of buckets get scanned across erasure sets always.
 | 
					
						
							|  |  |  | 	// at any given point in time. This allows different buckets to be scanned
 | 
					
						
							|  |  |  | 	// in different order per erasure set, this wider spread is needed when
 | 
					
						
							|  |  |  | 	// there are lots of buckets with different order of objects in them.
 | 
					
						
							|  |  |  | 	r := rand.New(rand.NewSource(time.Now().UnixNano())) | 
					
						
							|  |  |  | 	permutes := r.Perm(len(buckets)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	// Add new buckets first
 | 
					
						
							| 
									
										
										
										
											2023-07-14 17:25:40 +08:00
										 |  |  | 	for _, idx := range permutes { | 
					
						
							|  |  |  | 		b := buckets[idx] | 
					
						
							|  |  |  | 		if e := oldCache.find(b.Name); e == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			bucketCh <- b | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-07-14 17:25:40 +08:00
										 |  |  | 	for _, idx := range permutes { | 
					
						
							|  |  |  | 		b := buckets[idx] | 
					
						
							|  |  |  | 		if e := oldCache.find(b.Name); e != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			cache.replace(b.Name, dataUsageRoot, *e) | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 			bucketCh <- b | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2019-05-01 07:27:31 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2024-01-29 02:04:17 +08:00
										 |  |  | 	xioutil.SafeClose(bucketCh) | 
					
						
							| 
									
										
										
										
											2023-07-14 17:25:40 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	bucketResults := make(chan dataUsageEntryInfo, len(disks)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Start async collector/saver.
 | 
					
						
							|  |  |  | 	// This goroutine owns the cache.
 | 
					
						
							|  |  |  | 	var saverWg sync.WaitGroup | 
					
						
							|  |  |  | 	saverWg.Add(1) | 
					
						
							|  |  |  | 	go func() { | 
					
						
							| 
									
										
										
										
											2021-02-18 16:38:37 +08:00
										 |  |  | 		// Add jitter to the update time so multiple sets don't sync up.
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 		updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64()) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 		t := time.NewTicker(updateTime) | 
					
						
							|  |  |  | 		defer t.Stop() | 
					
						
							|  |  |  | 		defer saverWg.Done() | 
					
						
							|  |  |  | 		var lastSave time.Time | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for { | 
					
						
							|  |  |  | 			select { | 
					
						
							|  |  |  | 			case <-t.C: | 
					
						
							|  |  |  | 				if cache.Info.LastUpdate.Equal(lastSave) { | 
					
						
							|  |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 				scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update") | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				updates <- cache.clone() | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				lastSave = cache.Info.LastUpdate | 
					
						
							|  |  |  | 			case v, ok := <-bucketResults: | 
					
						
							|  |  |  | 				if !ok { | 
					
						
							| 
									
										
										
										
											2020-12-27 14:58:06 +08:00
										 |  |  | 					// Save final state...
 | 
					
						
							| 
									
										
										
										
											2021-08-25 23:25:26 +08:00
										 |  |  | 					cache.Info.NextCycle = wantCycle | 
					
						
							| 
									
										
										
										
											2020-12-27 14:58:06 +08:00
										 |  |  | 					cache.Info.LastUpdate = time.Now() | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 					scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed") | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 					updates <- cache.clone() | 
					
						
							| 
									
										
										
										
											2020-12-27 14:58:06 +08:00
										 |  |  | 					return | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 				cache.replace(v.Name, v.Parent, v.Entry) | 
					
						
							|  |  |  | 				cache.Info.LastUpdate = time.Now() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-08-09 07:16:44 +08:00
										 |  |  | 	// Restrict parallelism for disk usage scanner
 | 
					
						
							|  |  |  | 	// upto GOMAXPROCS if GOMAXPROCS is < len(disks)
 | 
					
						
							|  |  |  | 	maxProcs := runtime.GOMAXPROCS(0) | 
					
						
							|  |  |  | 	if maxProcs < len(disks) { | 
					
						
							|  |  |  | 		disks = disks[:maxProcs] | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 	// Start one scanner per disk
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	var wg sync.WaitGroup | 
					
						
							|  |  |  | 	wg.Add(len(disks)) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	for i := range disks { | 
					
						
							|  |  |  | 		go func(i int) { | 
					
						
							|  |  |  | 			defer wg.Done() | 
					
						
							|  |  |  | 			disk := disks[i] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			for bucket := range bucketCh { | 
					
						
							|  |  |  | 				select { | 
					
						
							|  |  |  | 				case <-ctx.Done(): | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				default: | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 				// Load cache for bucket
 | 
					
						
							|  |  |  | 				cacheName := pathJoin(bucket.Name, dataUsageCacheName) | 
					
						
							|  |  |  | 				cache := dataUsageCache{} | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 				scannerLogIf(ctx, cache.load(ctx, er, cacheName)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				if cache.Info.Name == "" { | 
					
						
							|  |  |  | 					cache.Info.Name = bucket.Name | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2024-06-19 00:11:04 +08:00
										 |  |  | 				cache.Info.SkipHealing = healing | 
					
						
							| 
									
										
										
										
											2021-08-25 23:25:26 +08:00
										 |  |  | 				cache.Info.NextCycle = wantCycle | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				if cache.Info.Name != bucket.Name { | 
					
						
							|  |  |  | 					cache.Info = dataUsageCacheInfo{ | 
					
						
							|  |  |  | 						Name:       bucket.Name, | 
					
						
							|  |  |  | 						LastUpdate: time.Time{}, | 
					
						
							| 
									
										
										
										
											2021-08-25 23:25:26 +08:00
										 |  |  | 						NextCycle:  wantCycle, | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 				// Collect updates.
 | 
					
						
							|  |  |  | 				updates := make(chan dataUsageEntry, 1) | 
					
						
							|  |  |  | 				var wg sync.WaitGroup | 
					
						
							|  |  |  | 				wg.Add(1) | 
					
						
							| 
									
										
										
										
											2021-07-03 02:19:56 +08:00
										 |  |  | 				go func(name string) { | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 					defer wg.Done() | 
					
						
							|  |  |  | 					for update := range updates { | 
					
						
							| 
									
										
										
										
											2023-03-01 13:34:45 +08:00
										 |  |  | 						select { | 
					
						
							|  |  |  | 						case <-ctx.Done(): | 
					
						
							|  |  |  | 						case bucketResults <- dataUsageEntryInfo{ | 
					
						
							| 
									
										
										
										
											2021-07-03 02:19:56 +08:00
										 |  |  | 							Name:   name, | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 							Parent: dataUsageRoot, | 
					
						
							|  |  |  | 							Entry:  update, | 
					
						
							| 
									
										
										
										
											2023-03-01 13:34:45 +08:00
										 |  |  | 						}: | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 						} | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-07-03 02:19:56 +08:00
										 |  |  | 				}(cache.Info.Name) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				// Calc usage
 | 
					
						
							|  |  |  | 				before := cache.Info.LastUpdate | 
					
						
							| 
									
										
										
										
											2020-09-29 10:39:32 +08:00
										 |  |  | 				var err error | 
					
						
							| 
									
										
										
										
											2024-01-03 05:51:24 +08:00
										 |  |  | 				cache, err = disk.NSScanner(ctx, cache, updates, healScanMode, nil) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				if err != nil { | 
					
						
							| 
									
										
										
										
											2021-02-23 02:04:32 +08:00
										 |  |  | 					if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) { | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 						scannerLogIf(ctx, cache.save(ctx, er, cacheName)) | 
					
						
							| 
									
										
										
										
											2021-02-23 02:04:32 +08:00
										 |  |  | 					} else { | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 						scannerLogIf(ctx, err) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					} | 
					
						
							| 
									
										
										
										
											2022-01-19 16:46:43 +08:00
										 |  |  | 					// This ensures that we don't close
 | 
					
						
							|  |  |  | 					// bucketResults channel while the
 | 
					
						
							|  |  |  | 					// updates-collector goroutine still
 | 
					
						
							|  |  |  | 					// holds a reference to this.
 | 
					
						
							|  |  |  | 					wg.Wait() | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					continue | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 				wg.Wait() | 
					
						
							| 
									
										
										
										
											2024-03-30 02:57:52 +08:00
										 |  |  | 				// Flatten for upstream, but save full state.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				var root dataUsageEntry | 
					
						
							|  |  |  | 				if r := cache.root(); r != nil { | 
					
						
							|  |  |  | 					root = cache.flatten(*r) | 
					
						
							| 
									
										
										
										
											2024-03-30 02:57:52 +08:00
										 |  |  | 					if root.ReplicationStats.empty() { | 
					
						
							|  |  |  | 						root.ReplicationStats = nil | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2023-03-01 13:34:45 +08:00
										 |  |  | 				select { | 
					
						
							|  |  |  | 				case <-ctx.Done(): | 
					
						
							|  |  |  | 					return | 
					
						
							|  |  |  | 				case bucketResults <- dataUsageEntryInfo{ | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 					Name:   cache.Info.Name, | 
					
						
							|  |  |  | 					Parent: dataUsageRoot, | 
					
						
							|  |  |  | 					Entry:  root, | 
					
						
							| 
									
										
										
										
											2023-03-01 13:34:45 +08:00
										 |  |  | 				}: | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 				// Save cache
 | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 				scannerLogIf(ctx, cache.save(ctx, er, cacheName)) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		}(i) | 
					
						
							| 
									
										
										
										
											2019-01-17 20:58:18 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	wg.Wait() | 
					
						
							| 
									
										
										
										
											2024-01-29 02:04:17 +08:00
										 |  |  | 	xioutil.SafeClose(bucketResults) | 
					
						
							| 
									
										
										
										
											2020-06-13 11:04:01 +08:00
										 |  |  | 	saverWg.Wait() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } |