| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | // Copyright (c) 2015-2023 MinIO, Inc.
 | 
					
						
							| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2021-01-01 01:45:09 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"path" | 
					
						
							|  |  |  | 	"path/filepath" | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	"sort" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"strings" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/cespare/xxhash/v2" | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | 	"github.com/dustin/go-humanize" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"github.com/klauspost/compress/zstd" | 
					
						
							| 
									
										
										
										
											2023-06-20 08:53:08 +08:00
										 |  |  | 	"github.com/minio/madmin-go/v3" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/bucket/lifecycle" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"github.com/tinylib/msgp/msgp" | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 	"github.com/valyala/bytebufferpool" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-10-22 23:30:50 +08:00
										 |  |  | //msgp:clearomitted
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | //go:generate msgp -file $GOFILE -unexported
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // dataUsageHash is the hash type used.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | type dataUsageHash string | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | // sizeHistogramV1 is size histogram V1, which has fewer intervals esp. between
 | 
					
						
							|  |  |  | // 1024B and 1MiB.
 | 
					
						
							|  |  |  | type sizeHistogramV1 [dataUsageBucketLenV1]uint64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // sizeHistogram is a size histogram.
 | 
					
						
							|  |  |  | type sizeHistogram [dataUsageBucketLen]uint64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | // versionsHistogram is a histogram of number of versions in an object.
 | 
					
						
							|  |  |  | type versionsHistogram [dataUsageVersionLen]uint64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | type dataUsageEntry struct { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	Children dataUsageHashMap `msg:"ch"` | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 	Size          int64             `msg:"sz"` | 
					
						
							|  |  |  | 	Objects       uint64            `msg:"os"` | 
					
						
							|  |  |  | 	Versions      uint64            `msg:"vs"` // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	DeleteMarkers uint64            `msg:"dms"` | 
					
						
							|  |  |  | 	ObjSizes      sizeHistogram     `msg:"szs"` | 
					
						
							|  |  |  | 	ObjVersions   versionsHistogram `msg:"vh"` | 
					
						
							|  |  |  | 	AllTierStats  *allTierStats     `msg:"ats,omitempty"` | 
					
						
							|  |  |  | 	Compacted     bool              `msg:"c"` | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // allTierStats is a collection of per-tier stats across all configured remote
 | 
					
						
							|  |  |  | // tiers.
 | 
					
						
							|  |  |  | type allTierStats struct { | 
					
						
							|  |  |  | 	Tiers map[string]tierStats `msg:"ts"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func newAllTierStats() *allTierStats { | 
					
						
							|  |  |  | 	return &allTierStats{ | 
					
						
							|  |  |  | 		Tiers: make(map[string]tierStats), | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | func (ats *allTierStats) addSizes(tiers map[string]tierStats) { | 
					
						
							|  |  |  | 	for tier, st := range tiers { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 		ats.Tiers[tier] = ats.Tiers[tier].add(st) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ats *allTierStats) merge(other *allTierStats) { | 
					
						
							|  |  |  | 	for tier, st := range other.Tiers { | 
					
						
							|  |  |  | 		ats.Tiers[tier] = ats.Tiers[tier].add(st) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-06-13 00:17:11 +08:00
										 |  |  | func (ats *allTierStats) clone() *allTierStats { | 
					
						
							|  |  |  | 	if ats == nil { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	dst := *ats | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 	dst.Tiers = make(map[string]tierStats, len(ats.Tiers)) | 
					
						
							|  |  |  | 	for tier, st := range ats.Tiers { | 
					
						
							|  |  |  | 		dst.Tiers[tier] = st | 
					
						
							| 
									
										
										
										
											2023-06-13 00:17:11 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return &dst | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | func (ats *allTierStats) populateStats(stats map[string]madmin.TierStats) { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	if ats == nil { | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 		return | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update stats for tiers as they become available.
 | 
					
						
							|  |  |  | 	for tier, st := range ats.Tiers { | 
					
						
							|  |  |  | 		stats[tier] = madmin.TierStats{ | 
					
						
							|  |  |  | 			TotalSize:   st.TotalSize, | 
					
						
							|  |  |  | 			NumVersions: st.NumVersions, | 
					
						
							|  |  |  | 			NumObjects:  st.NumObjects, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 	return | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // tierStats holds per-tier stats of a remote tier.
 | 
					
						
							|  |  |  | type tierStats struct { | 
					
						
							|  |  |  | 	TotalSize   uint64 `msg:"ts"` | 
					
						
							|  |  |  | 	NumVersions int    `msg:"nv"` | 
					
						
							|  |  |  | 	NumObjects  int    `msg:"no"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ts tierStats) add(u tierStats) tierStats { | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 	return tierStats{ | 
					
						
							|  |  |  | 		TotalSize:   ts.TotalSize + u.TotalSize, | 
					
						
							|  |  |  | 		NumVersions: ts.NumVersions + u.NumVersions, | 
					
						
							|  |  |  | 		NumObjects:  ts.NumObjects + u.NumObjects, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | //msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6 dataUsageEntryV7
 | 
					
						
							|  |  |  | //msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6 dataUsageEntryV7
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV2
 | 
					
						
							|  |  |  | type dataUsageEntryV2 struct { | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							|  |  |  | 	Size     int64 | 
					
						
							|  |  |  | 	Objects  uint64 | 
					
						
							|  |  |  | 	ObjSizes sizeHistogram | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV3
 | 
					
						
							|  |  |  | type dataUsageEntryV3 struct { | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 	Size     int64 | 
					
						
							|  |  |  | 	Objects  uint64 | 
					
						
							|  |  |  | 	ObjSizes sizeHistogram | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV4
 | 
					
						
							|  |  |  | type dataUsageEntryV4 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 	Size     int64 | 
					
						
							|  |  |  | 	Objects  uint64 | 
					
						
							|  |  |  | 	ObjSizes sizeHistogram | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV5
 | 
					
						
							|  |  |  | type dataUsageEntryV5 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 	Size      int64 | 
					
						
							|  |  |  | 	Objects   uint64 | 
					
						
							|  |  |  | 	Versions  uint64 // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	ObjSizes  sizeHistogram | 
					
						
							|  |  |  | 	Compacted bool | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV6
 | 
					
						
							|  |  |  | type dataUsageEntryV6 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 	Size      int64 | 
					
						
							|  |  |  | 	Objects   uint64 | 
					
						
							|  |  |  | 	Versions  uint64 // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	ObjSizes  sizeHistogram | 
					
						
							|  |  |  | 	Compacted bool | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | type dataUsageEntryV7 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap `msg:"ch"` | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 	Size          int64             `msg:"sz"` | 
					
						
							|  |  |  | 	Objects       uint64            `msg:"os"` | 
					
						
							|  |  |  | 	Versions      uint64            `msg:"vs"` // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	DeleteMarkers uint64            `msg:"dms"` | 
					
						
							|  |  |  | 	ObjSizes      sizeHistogramV1   `msg:"szs"` | 
					
						
							|  |  |  | 	ObjVersions   versionsHistogram `msg:"vh"` | 
					
						
							|  |  |  | 	AllTierStats  *allTierStats     `msg:"ats,omitempty"` | 
					
						
							|  |  |  | 	Compacted     bool              `msg:"c"` | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // dataUsageCache contains a cache of data usage entries latest version.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | type dataUsageCache struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntry | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | //msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6 dataUsageCacheV7
 | 
					
						
							|  |  |  | //msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6 dataUsageCacheV7
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | // dataUsageCacheV2 contains a cache of data usage entries version 2.
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | type dataUsageCacheV2 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV2 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV3 contains a cache of data usage entries version 3.
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | type dataUsageCacheV3 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV3 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV4 contains a cache of data usage entries version 4.
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | type dataUsageCacheV4 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV4 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV5 contains a cache of data usage entries version 5.
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | type dataUsageCacheV5 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV5 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV6 contains a cache of data usage entries version 6.
 | 
					
						
							|  |  |  | type dataUsageCacheV6 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV6 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | // dataUsageCacheV7 contains a cache of data usage entries version 7.
 | 
					
						
							|  |  |  | type dataUsageCacheV7 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV7 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | //msgp:ignore dataUsageEntryInfo
 | 
					
						
							|  |  |  | type dataUsageEntryInfo struct { | 
					
						
							|  |  |  | 	Name   string | 
					
						
							|  |  |  | 	Parent string | 
					
						
							|  |  |  | 	Entry  dataUsageEntry | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type dataUsageCacheInfo struct { | 
					
						
							|  |  |  | 	// Name of the bucket. Also root element.
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	Name       string | 
					
						
							|  |  |  | 	NextCycle  uint32 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 	LastUpdate time.Time | 
					
						
							| 
									
										
										
										
											2024-06-19 00:11:04 +08:00
										 |  |  | 	// indicates if the disk is being healed and scanner
 | 
					
						
							|  |  |  | 	// should skip healing the disk
 | 
					
						
							|  |  |  | 	SkipHealing bool | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Active lifecycle, if any on the bucket
 | 
					
						
							|  |  |  | 	lifeCycle *lifecycle.Lifecycle `msg:"-"` | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// optional updates channel.
 | 
					
						
							|  |  |  | 	// If set updates will be sent regularly to this channel.
 | 
					
						
							|  |  |  | 	// Will not be closed when returned.
 | 
					
						
							| 
									
										
										
										
											2021-06-02 10:59:11 +08:00
										 |  |  | 	updates     chan<- dataUsageEntry `msg:"-"` | 
					
						
							|  |  |  | 	replication replicationConfig     `msg:"-"` | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | func (e *dataUsageEntry) addSizes(summary sizeSummary) { | 
					
						
							|  |  |  | 	e.Size += summary.totalSize | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	e.Versions += summary.versions | 
					
						
							| 
									
										
										
										
											2023-07-19 01:49:40 +08:00
										 |  |  | 	e.DeleteMarkers += summary.deleteMarkers | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	e.ObjSizes.add(summary.totalSize) | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	e.ObjVersions.add(summary.versions) | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 	if len(summary.tiers) != 0 { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 		if e.AllTierStats == nil { | 
					
						
							|  |  |  | 			e.AllTierStats = newAllTierStats() | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 		e.AllTierStats.addSizes(summary.tiers) | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // merge other data usage entry into this, excluding children.
 | 
					
						
							|  |  |  | func (e *dataUsageEntry) merge(other dataUsageEntry) { | 
					
						
							|  |  |  | 	e.Objects += other.Objects | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	e.Versions += other.Versions | 
					
						
							| 
									
										
										
										
											2023-07-19 01:49:40 +08:00
										 |  |  | 	e.DeleteMarkers += other.DeleteMarkers | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	e.Size += other.Size | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	for i, v := range other.ObjSizes[:] { | 
					
						
							|  |  |  | 		e.ObjSizes[i] += v | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	for i, v := range other.ObjVersions[:] { | 
					
						
							|  |  |  | 		e.ObjVersions[i] += v | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-10-31 00:59:51 +08:00
										 |  |  | 	if other.AllTierStats != nil && len(other.AllTierStats.Tiers) != 0 { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 		if e.AllTierStats == nil { | 
					
						
							|  |  |  | 			e.AllTierStats = newAllTierStats() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		e.AllTierStats.merge(other.AllTierStats) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // mod returns true if the hash mod cycles == cycle.
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | // If cycles is 0 false is always returned.
 | 
					
						
							|  |  |  | // If cycles is 1 true is always returned (as expected).
 | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	if cycles <= 1 { | 
					
						
							|  |  |  | 		return cycles == 1 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-08 01:25:53 +08:00
										 |  |  | // modAlt returns true if the hash mod cycles == cycle.
 | 
					
						
							|  |  |  | // This is out of sync with mod.
 | 
					
						
							|  |  |  | // If cycles is 0 false is always returned.
 | 
					
						
							|  |  |  | // If cycles is 1 true is always returned (as expected).
 | 
					
						
							|  |  |  | func (h dataUsageHash) modAlt(cycle uint32, cycles uint32) bool { | 
					
						
							|  |  |  | 	if cycles <= 1 { | 
					
						
							|  |  |  | 		return cycles == 1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return uint32(xxhash.Sum64String(string(h))>>32)%(cycles) == cycle%cycles | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // addChild will add a child based on its hash.
 | 
					
						
							|  |  |  | // If it already exists it will not be added again.
 | 
					
						
							|  |  |  | func (e *dataUsageEntry) addChild(hash dataUsageHash) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if _, ok := e.Children[hash.Key()]; ok { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if e.Children == nil { | 
					
						
							|  |  |  | 		e.Children = make(dataUsageHashMap, 1) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	e.Children[hash.Key()] = struct{}{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-24 22:11:38 +08:00
										 |  |  | // Create a clone of the entry.
 | 
					
						
							|  |  |  | func (e dataUsageEntry) clone() dataUsageEntry { | 
					
						
							|  |  |  | 	// We operate on a copy from the receiver.
 | 
					
						
							|  |  |  | 	if e.Children != nil { | 
					
						
							|  |  |  | 		ch := make(dataUsageHashMap, len(e.Children)) | 
					
						
							|  |  |  | 		for k, v := range e.Children { | 
					
						
							|  |  |  | 			ch[k] = v | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		e.Children = ch | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	if e.AllTierStats != nil { | 
					
						
							| 
									
										
										
										
											2023-06-13 00:17:11 +08:00
										 |  |  | 		e.AllTierStats = e.AllTierStats.clone() | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-24 22:11:38 +08:00
										 |  |  | 	return e | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // find a path in the cache.
 | 
					
						
							|  |  |  | // Returns nil if not found.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) find(path string) *dataUsageEntry { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	due, ok := d.Cache[hashPath(path).Key()] | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return &due | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // isCompacted returns whether an entry is compacted.
 | 
					
						
							|  |  |  | // Returns false if not found.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) isCompacted(h dataUsageHash) bool { | 
					
						
							|  |  |  | 	due, ok := d.Cache[h.Key()] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return due.Compacted | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | // findChildrenCopy returns a copy of the children of the supplied hash.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap { | 
					
						
							|  |  |  | 	ch := d.Cache[h.String()].Children | 
					
						
							|  |  |  | 	res := make(dataUsageHashMap, len(ch)) | 
					
						
							|  |  |  | 	for k := range ch { | 
					
						
							|  |  |  | 		res[k] = struct{}{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | // searchParent will search for the parent of h.
 | 
					
						
							|  |  |  | // This is an O(N*N) operation if there is no parent or it cannot be guessed.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash { | 
					
						
							|  |  |  | 	want := h.Key() | 
					
						
							|  |  |  | 	if idx := strings.LastIndexByte(want, '/'); idx >= 0 { | 
					
						
							|  |  |  | 		if v := d.find(want[:idx]); v != nil { | 
					
						
							| 
									
										
										
										
											2024-03-27 23:12:14 +08:00
										 |  |  | 			_, ok := v.Children[want] | 
					
						
							|  |  |  | 			if ok { | 
					
						
							|  |  |  | 				found := hashPath(want[:idx]) | 
					
						
							|  |  |  | 				return &found | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for k, v := range d.Cache { | 
					
						
							| 
									
										
										
										
											2024-03-27 23:12:14 +08:00
										 |  |  | 		_, ok := v.Children[want] | 
					
						
							|  |  |  | 		if ok { | 
					
						
							|  |  |  | 			found := dataUsageHash(k) | 
					
						
							|  |  |  | 			return &found | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // deleteRecursive will delete an entry recursively, but not change its parent.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | func (d *dataUsageCache) deleteRecursive(h dataUsageHash) { | 
					
						
							|  |  |  | 	if existing, ok := d.Cache[h.String()]; ok { | 
					
						
							|  |  |  | 		// Delete first if there should be a loop.
 | 
					
						
							|  |  |  | 		delete(d.Cache, h.Key()) | 
					
						
							|  |  |  | 		for child := range existing.Children { | 
					
						
							|  |  |  | 			d.deleteRecursive(dataUsageHash(child)) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | // dui converts the flattened version of the path to madmin.DataUsageInfo.
 | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | // As a side effect d will be flattened, use a clone if this is not ok.
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	e := d.find(path) | 
					
						
							|  |  |  | 	if e == nil { | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | 		// No entry found, return empty.
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		return DataUsageInfo{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	flat := d.flatten(*e) | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	dui := DataUsageInfo{ | 
					
						
							| 
									
										
										
										
											2023-07-19 01:49:40 +08:00
										 |  |  | 		LastUpdate:              d.Info.LastUpdate, | 
					
						
							|  |  |  | 		ObjectsTotalCount:       flat.Objects, | 
					
						
							|  |  |  | 		VersionsTotalCount:      flat.Versions, | 
					
						
							|  |  |  | 		DeleteMarkersTotalCount: flat.DeleteMarkers, | 
					
						
							|  |  |  | 		ObjectsTotalSize:        uint64(flat.Size), | 
					
						
							|  |  |  | 		BucketsCount:            uint64(len(e.Children)), | 
					
						
							|  |  |  | 		BucketsUsage:            d.bucketsUsageInfo(buckets), | 
					
						
							|  |  |  | 		TierStats:               d.tiersUsageInfo(buckets), | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return dui | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // replace will add or replace an entry in the cache.
 | 
					
						
							|  |  |  | // If a parent is specified it will be added to that if not already there.
 | 
					
						
							|  |  |  | // If the parent does not exist, it will be added.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) { | 
					
						
							|  |  |  | 	hash := hashPath(path) | 
					
						
							|  |  |  | 	if d.Cache == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache = make(map[string]dataUsageEntry, 100) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	d.Cache[hash.Key()] = e | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if parent != "" { | 
					
						
							|  |  |  | 		phash := hashPath(parent) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		p := d.Cache[phash.Key()] | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		p.addChild(hash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache[phash.Key()] = p | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // replaceHashed add or replaces an entry to the cache based on its hash.
 | 
					
						
							|  |  |  | // If a parent is specified it will be added to that if not already there.
 | 
					
						
							|  |  |  | // If the parent does not exist, it will be added.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) replaceHashed(hash dataUsageHash, parent *dataUsageHash, e dataUsageEntry) { | 
					
						
							|  |  |  | 	if d.Cache == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache = make(map[string]dataUsageEntry, 100) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	d.Cache[hash.Key()] = e | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if parent != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		p := d.Cache[parent.Key()] | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		p.addChild(hash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache[parent.Key()] = p | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | // copyWithChildren will copy entry with hash from src if it exists along with any children.
 | 
					
						
							|  |  |  | // If a parent is specified it will be added to that if not already there.
 | 
					
						
							|  |  |  | // If the parent does not exist, it will be added.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHash, parent *dataUsageHash) { | 
					
						
							|  |  |  | 	if d.Cache == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache = make(map[string]dataUsageEntry, 100) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	e, ok := src.Cache[hash.String()] | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	d.Cache[hash.Key()] = e | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	for ch := range e.Children { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if ch == hash.Key() { | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 			scannerLogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.copyWithChildren(src, dataUsageHash(ch), &hash) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if parent != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		p := d.Cache[parent.Key()] | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 		p.addChild(hash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache[parent.Key()] = p | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // reduceChildrenOf will reduce the recursive number of children to the limit
 | 
					
						
							|  |  |  | // by compacting the children with the least number of objects.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compactSelf bool) { | 
					
						
							|  |  |  | 	e, ok := d.Cache[path.Key()] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if e.Compacted { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// If direct children have more, compact all.
 | 
					
						
							|  |  |  | 	if len(e.Children) > limit && compactSelf { | 
					
						
							|  |  |  | 		flat := d.sizeRecursive(path.Key()) | 
					
						
							|  |  |  | 		flat.Compacted = true | 
					
						
							|  |  |  | 		d.deleteRecursive(path) | 
					
						
							|  |  |  | 		d.replaceHashed(path, nil, *flat) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	total := d.totalChildrenRec(path.Key()) | 
					
						
							|  |  |  | 	if total < limit { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Appears to be printed with _MINIO_SERVER_DEBUG=off
 | 
					
						
							|  |  |  | 	// console.Debugf(" %d children found, compacting %v\n", total, path)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	leaves := make([]struct { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		objects uint64 | 
					
						
							|  |  |  | 		path    dataUsageHash | 
					
						
							|  |  |  | 	}, total) | 
					
						
							|  |  |  | 	// Collect current leaves that have children.
 | 
					
						
							|  |  |  | 	leaves = leaves[:0] | 
					
						
							|  |  |  | 	remove := total - limit | 
					
						
							|  |  |  | 	var add func(path dataUsageHash) | 
					
						
							|  |  |  | 	add = func(path dataUsageHash) { | 
					
						
							|  |  |  | 		e, ok := d.Cache[path.Key()] | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if len(e.Children) == 0 { | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		sz := d.sizeRecursive(path.Key()) | 
					
						
							|  |  |  | 		leaves = append(leaves, struct { | 
					
						
							|  |  |  | 			objects uint64 | 
					
						
							|  |  |  | 			path    dataUsageHash | 
					
						
							|  |  |  | 		}{objects: sz.Objects, path: path}) | 
					
						
							|  |  |  | 		for ch := range e.Children { | 
					
						
							|  |  |  | 			add(dataUsageHash(ch)) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Add path recursively.
 | 
					
						
							|  |  |  | 	add(path) | 
					
						
							|  |  |  | 	sort.Slice(leaves, func(i, j int) bool { | 
					
						
							|  |  |  | 		return leaves[i].objects < leaves[j].objects | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 	for remove > 0 && len(leaves) > 0 { | 
					
						
							|  |  |  | 		// Remove top entry.
 | 
					
						
							|  |  |  | 		e := leaves[0] | 
					
						
							|  |  |  | 		candidate := e.path | 
					
						
							|  |  |  | 		if candidate == path && !compactSelf { | 
					
						
							|  |  |  | 			// We should be the biggest,
 | 
					
						
							|  |  |  | 			// if we cannot compact ourself, we are done.
 | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		removing := d.totalChildrenRec(candidate.Key()) | 
					
						
							|  |  |  | 		flat := d.sizeRecursive(candidate.Key()) | 
					
						
							|  |  |  | 		if flat == nil { | 
					
						
							|  |  |  | 			leaves = leaves[1:] | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Appears to be printed with _MINIO_SERVER_DEBUG=off
 | 
					
						
							|  |  |  | 		// console.Debugf("compacting %v, removing %d children\n", candidate, removing)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		flat.Compacted = true | 
					
						
							|  |  |  | 		d.deleteRecursive(candidate) | 
					
						
							|  |  |  | 		d.replaceHashed(candidate, nil, *flat) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Remove top entry and subtract removed children.
 | 
					
						
							|  |  |  | 		remove -= removing | 
					
						
							|  |  |  | 		leaves = leaves[1:] | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-05-10 22:49:50 +08:00
										 |  |  | // forceCompact will force compact the cache of the top entry.
 | 
					
						
							|  |  |  | // If the number of children is more than limit*100, it will compact self.
 | 
					
						
							|  |  |  | // When above the limit a cleanup will also be performed to remove any possible abandoned entries.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) forceCompact(limit int) { | 
					
						
							|  |  |  | 	if d == nil || len(d.Cache) <= limit { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	top := hashPath(d.Info.Name).Key() | 
					
						
							|  |  |  | 	topE := d.find(top) | 
					
						
							|  |  |  | 	if topE == nil { | 
					
						
							|  |  |  | 		scannerLogIf(GlobalContext, errors.New("forceCompact: root not found")) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// If off by 2 orders of magnitude, compact self and log error.
 | 
					
						
							|  |  |  | 	if len(topE.Children) > dataScannerForceCompactAtFolders { | 
					
						
							|  |  |  | 		// If we still have too many children, compact self.
 | 
					
						
							|  |  |  | 		scannerLogOnceIf(GlobalContext, fmt.Errorf("forceCompact: %q has %d children. Force compacting. Expect reduced scanner performance", d.Info.Name, len(topE.Children)), d.Info.Name) | 
					
						
							|  |  |  | 		d.reduceChildrenOf(hashPath(d.Info.Name), limit, true) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if len(d.Cache) <= limit { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Check for abandoned entries.
 | 
					
						
							|  |  |  | 	found := make(map[string]struct{}, len(d.Cache)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Mark all children recursively
 | 
					
						
							|  |  |  | 	var mark func(entry dataUsageEntry) | 
					
						
							|  |  |  | 	mark = func(entry dataUsageEntry) { | 
					
						
							|  |  |  | 		for k := range entry.Children { | 
					
						
							|  |  |  | 			found[k] = struct{}{} | 
					
						
							|  |  |  | 			if ch, ok := d.Cache[k]; ok { | 
					
						
							|  |  |  | 				mark(ch) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	found[top] = struct{}{} | 
					
						
							|  |  |  | 	mark(*topE) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Delete all entries not found.
 | 
					
						
							|  |  |  | 	for k := range d.Cache { | 
					
						
							|  |  |  | 		if _, ok := found[k]; !ok { | 
					
						
							|  |  |  | 			delete(d.Cache, k) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // StringAll returns a detailed string representation of all entries in the cache.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) StringAll() string { | 
					
						
							| 
									
										
										
										
											2021-08-05 00:14:14 +08:00
										 |  |  | 	// Remove bloom filter from print.
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	s := fmt.Sprintf("info:%+v\n", d.Info) | 
					
						
							|  |  |  | 	for k, v := range d.Cache { | 
					
						
							|  |  |  | 		s += fmt.Sprintf("\t%v: %+v\n", k, v) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return strings.TrimSpace(s) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // String returns a human readable representation of the string.
 | 
					
						
							|  |  |  | func (h dataUsageHash) String() string { | 
					
						
							|  |  |  | 	return string(h) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // Key returns the key.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | func (h dataUsageHash) Key() string { | 
					
						
							|  |  |  | 	return string(h) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-14 01:42:11 +08:00
										 |  |  | func (d *dataUsageCache) flattenChildrens(root dataUsageEntry) (m map[string]dataUsageEntry) { | 
					
						
							|  |  |  | 	m = make(map[string]dataUsageEntry) | 
					
						
							|  |  |  | 	for id := range root.Children { | 
					
						
							|  |  |  | 		e := d.Cache[id] | 
					
						
							|  |  |  | 		if len(e.Children) > 0 { | 
					
						
							|  |  |  | 			e = d.flatten(e) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		m[id] = e | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return m | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // flatten all children of the root into the root element and return it.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) flatten(root dataUsageEntry) dataUsageEntry { | 
					
						
							|  |  |  | 	for id := range root.Children { | 
					
						
							|  |  |  | 		e := d.Cache[id] | 
					
						
							|  |  |  | 		if len(e.Children) > 0 { | 
					
						
							|  |  |  | 			e = d.flatten(e) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		root.merge(e) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	root.Children = nil | 
					
						
							|  |  |  | 	return root | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // add a size to the histogram.
 | 
					
						
							|  |  |  | func (h *sizeHistogram) add(size int64) { | 
					
						
							|  |  |  | 	// Fetch the histogram interval corresponding
 | 
					
						
							|  |  |  | 	// to the passed object size.
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	for i, interval := range ObjectsHistogramIntervals[:] { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		if size >= interval.start && size <= interval.end { | 
					
						
							|  |  |  | 			h[i]++ | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | // mergeV1 is used to migrate data usage cache from sizeHistogramV1 to
 | 
					
						
							|  |  |  | // sizeHistogram
 | 
					
						
							|  |  |  | func (h *sizeHistogram) mergeV1(v sizeHistogramV1) { | 
					
						
							|  |  |  | 	var oidx, nidx int | 
					
						
							|  |  |  | 	for oidx < len(v) { | 
					
						
							|  |  |  | 		intOld, intNew := ObjectsHistogramIntervalsV1[oidx], ObjectsHistogramIntervals[nidx] | 
					
						
							|  |  |  | 		// skip intervals that aren't common to both histograms
 | 
					
						
							|  |  |  | 		if intOld.start != intNew.start || intOld.end != intNew.end { | 
					
						
							|  |  |  | 			nidx++ | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		h[nidx] += v[oidx] | 
					
						
							|  |  |  | 		oidx++ | 
					
						
							|  |  |  | 		nidx++ | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | // toMap returns the map to a map[string]uint64.
 | 
					
						
							|  |  |  | func (h *sizeHistogram) toMap() map[string]uint64 { | 
					
						
							|  |  |  | 	res := make(map[string]uint64, dataUsageBucketLen) | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | 	var splCount uint64 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	for i, count := range h { | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | 		szInt := ObjectsHistogramIntervals[i] | 
					
						
							|  |  |  | 		switch { | 
					
						
							|  |  |  | 		case humanize.KiByte == szInt.start && szInt.end == humanize.MiByte-1: | 
					
						
							|  |  |  | 			// spl interval: [1024B, 1MiB)
 | 
					
						
							|  |  |  | 			res[szInt.name] = splCount | 
					
						
							|  |  |  | 		case humanize.KiByte <= szInt.start && szInt.end <= humanize.MiByte-1: | 
					
						
							|  |  |  | 			// intervals that fall within the spl interval above; they
 | 
					
						
							|  |  |  | 			// appear earlier in this array of intervals, see
 | 
					
						
							|  |  |  | 			// ObjectsHistogramIntervals
 | 
					
						
							|  |  |  | 			splCount += count | 
					
						
							|  |  |  | 			fallthrough | 
					
						
							|  |  |  | 		default: | 
					
						
							|  |  |  | 			res[szInt.name] = count | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | // add a version count to the histogram.
 | 
					
						
							|  |  |  | func (h *versionsHistogram) add(versions uint64) { | 
					
						
							|  |  |  | 	// Fetch the histogram interval corresponding
 | 
					
						
							|  |  |  | 	// to the passed object size.
 | 
					
						
							|  |  |  | 	for i, interval := range ObjectsVersionCountIntervals[:] { | 
					
						
							|  |  |  | 		if versions >= uint64(interval.start) && versions <= uint64(interval.end) { | 
					
						
							|  |  |  | 			h[i]++ | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // toMap returns the map to a map[string]uint64.
 | 
					
						
							|  |  |  | func (h *versionsHistogram) toMap() map[string]uint64 { | 
					
						
							|  |  |  | 	res := make(map[string]uint64, dataUsageVersionLen) | 
					
						
							|  |  |  | 	for i, count := range h { | 
					
						
							|  |  |  | 		res[ObjectsVersionCountIntervals[i].name] = count | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats { | 
					
						
							|  |  |  | 	dst := newAllTierStats() | 
					
						
							|  |  |  | 	for _, bucket := range buckets { | 
					
						
							|  |  |  | 		e := d.find(bucket.Name) | 
					
						
							|  |  |  | 		if e == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		flat := d.flatten(*e) | 
					
						
							|  |  |  | 		if flat.AllTierStats == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		dst.merge(flat.AllTierStats) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if len(dst.Tiers) == 0 { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return dst | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | // bucketsUsageInfo returns the buckets usage info as a map, with
 | 
					
						
							|  |  |  | // key as bucket name
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo { | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	dst := make(map[string]BucketUsageInfo, len(buckets)) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	for _, bucket := range buckets { | 
					
						
							|  |  |  | 		e := d.find(bucket.Name) | 
					
						
							|  |  |  | 		if e == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		flat := d.flatten(*e) | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		bui := BucketUsageInfo{ | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 			Size:                    uint64(flat.Size), | 
					
						
							|  |  |  | 			VersionsCount:           flat.Versions, | 
					
						
							|  |  |  | 			ObjectsCount:            flat.Objects, | 
					
						
							| 
									
										
										
										
											2023-07-19 01:49:40 +08:00
										 |  |  | 			DeleteMarkersCount:      flat.DeleteMarkers, | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 			ObjectSizesHistogram:    flat.ObjSizes.toMap(), | 
					
						
							|  |  |  | 			ObjectVersionsHistogram: flat.ObjVersions.toMap(), | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		dst[bucket.Name] = bui | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return dst | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // sizeRecursive returns the path as a flattened entry.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry { | 
					
						
							|  |  |  | 	root := d.find(path) | 
					
						
							|  |  |  | 	if root == nil || len(root.Children) == 0 { | 
					
						
							|  |  |  | 		return root | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	flat := d.flatten(*root) | 
					
						
							|  |  |  | 	return &flat | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // totalChildrenRec returns the total number of children recorded.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) totalChildrenRec(path string) int { | 
					
						
							|  |  |  | 	root := d.find(path) | 
					
						
							|  |  |  | 	if root == nil || len(root.Children) == 0 { | 
					
						
							|  |  |  | 		return 0 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	n := len(root.Children) | 
					
						
							|  |  |  | 	for ch := range root.Children { | 
					
						
							|  |  |  | 		n += d.totalChildrenRec(ch) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return n | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // root returns the root of the cache.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) root() *dataUsageEntry { | 
					
						
							|  |  |  | 	return d.find(d.Info.Name) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // rootHash returns the root of the cache.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) rootHash() dataUsageHash { | 
					
						
							|  |  |  | 	return hashPath(d.Info.Name) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // clone returns a copy of the cache with no references to the existing.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) clone() dataUsageCache { | 
					
						
							|  |  |  | 	clone := dataUsageCache{ | 
					
						
							|  |  |  | 		Info:  d.Info, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		Cache: make(map[string]dataUsageEntry, len(d.Cache)), | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	for k, v := range d.Cache { | 
					
						
							| 
									
										
										
										
											2021-08-24 22:11:38 +08:00
										 |  |  | 		clone.Cache[k] = v.clone() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return clone | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // merge root of other into d.
 | 
					
						
							|  |  |  | // children of root will be flattened before being merged.
 | 
					
						
							|  |  |  | // Last update time will be set to the last updated.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) merge(other dataUsageCache) { | 
					
						
							|  |  |  | 	existingRoot := d.root() | 
					
						
							|  |  |  | 	otherRoot := other.root() | 
					
						
							|  |  |  | 	if existingRoot == nil && otherRoot == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if otherRoot == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if existingRoot == nil { | 
					
						
							|  |  |  | 		*d = other.clone() | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if other.Info.LastUpdate.After(d.Info.LastUpdate) { | 
					
						
							|  |  |  | 		d.Info.LastUpdate = other.Info.LastUpdate | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	existingRoot.merge(*otherRoot) | 
					
						
							|  |  |  | 	eHash := d.rootHash() | 
					
						
							|  |  |  | 	for key := range otherRoot.Children { | 
					
						
							|  |  |  | 		entry := other.Cache[key] | 
					
						
							|  |  |  | 		flat := other.flatten(entry) | 
					
						
							|  |  |  | 		existing := d.Cache[key] | 
					
						
							|  |  |  | 		// If not found, merging simply adds.
 | 
					
						
							|  |  |  | 		existing.merge(flat) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.replaceHashed(dataUsageHash(key), &eHash, existing) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | type objectIO interface { | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (reader *GetObjectReader, err error) | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // load the cache content with name from minioMetaBackgroundOpsBucket.
 | 
					
						
							|  |  |  | // Only backend errors are returned as errors.
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | // The loader is optimistic and has no locking, but tries 5 times before giving up.
 | 
					
						
							| 
									
										
										
										
											2023-10-03 10:22:35 +08:00
										 |  |  | // If the object is not found, a nil error with empty data usage cache is returned.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error { | 
					
						
							| 
									
										
										
										
											2024-01-18 15:03:17 +08:00
										 |  |  | 	// By default, empty data usage cache
 | 
					
						
							| 
									
										
										
										
											2023-09-16 04:11:08 +08:00
										 |  |  | 	*d = dataUsageCache{} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 	load := func(name string, timeout time.Duration) (bool, error) { | 
					
						
							|  |  |  | 		// Abandon if more than time.Minute, so we don't hold up scanner.
 | 
					
						
							|  |  |  | 		// drive timeout by default is 2 minutes, we do not need to wait longer.
 | 
					
						
							|  |  |  | 		ctx, cancel := context.WithTimeout(ctx, timeout) | 
					
						
							|  |  |  | 		defer cancel() | 
					
						
							| 
									
										
										
										
											2022-01-04 02:22:58 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-04-23 01:49:30 +08:00
										 |  |  | 		r, err := store.GetObjectNInfo(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, name), nil, http.Header{}, ObjectOptions{NoLock: true}) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			switch err.(type) { | 
					
						
							|  |  |  | 			case ObjectNotFound, BucketNotFound: | 
					
						
							| 
									
										
										
										
											2024-04-23 01:49:30 +08:00
										 |  |  | 				r, err = store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true}) | 
					
						
							|  |  |  | 				if err != nil { | 
					
						
							|  |  |  | 					switch err.(type) { | 
					
						
							|  |  |  | 					case ObjectNotFound, BucketNotFound: | 
					
						
							|  |  |  | 						return false, nil | 
					
						
							|  |  |  | 					case InsufficientReadQuorum, StorageErr: | 
					
						
							|  |  |  | 						return true, nil | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					return false, err | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 				err = d.deserialize(r) | 
					
						
							|  |  |  | 				r.Close() | 
					
						
							|  |  |  | 				return err != nil, nil | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 			case InsufficientReadQuorum, StorageErr: | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 				return true, nil | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2023-09-16 04:11:08 +08:00
										 |  |  | 			return false, err | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		err = d.deserialize(r) | 
					
						
							|  |  |  | 		r.Close() | 
					
						
							|  |  |  | 		return err != nil, nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Caches are read+written without locks,
 | 
					
						
							|  |  |  | 	retries := 0 | 
					
						
							|  |  |  | 	for retries < 5 { | 
					
						
							|  |  |  | 		retry, err := load(name, time.Minute) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2023-09-16 04:11:08 +08:00
										 |  |  | 			return toObjectErr(err, dataUsageBucket, name) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-09-16 04:11:08 +08:00
										 |  |  | 		if !retry { | 
					
						
							|  |  |  | 			break | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-10-03 10:22:35 +08:00
										 |  |  | 		retry, err = load(name+".bkp", 30*time.Second) | 
					
						
							|  |  |  | 		if err == nil && !retry { | 
					
						
							|  |  |  | 			// Only return when we have valid data from the backup
 | 
					
						
							| 
									
										
										
										
											2023-09-16 04:11:08 +08:00
										 |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		retries++ | 
					
						
							|  |  |  | 		time.Sleep(time.Duration(rand.Int63n(int64(time.Second)))) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-10-03 10:22:35 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if retries == 5 { | 
					
						
							| 
									
										
										
										
											2024-04-04 20:04:40 +08:00
										 |  |  | 		scannerLogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache") | 
					
						
							| 
									
										
										
										
											2023-10-03 10:22:35 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | // Maximum running concurrent saves on server.
 | 
					
						
							|  |  |  | var maxConcurrentScannerSaves = make(chan struct{}, 4) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | // Note that no locking is done when saving.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error { | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 	select { | 
					
						
							|  |  |  | 	case <-ctx.Done(): | 
					
						
							|  |  |  | 		return ctx.Err() | 
					
						
							|  |  |  | 	case maxConcurrentScannerSaves <- struct{}{}: | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	buf := bytebufferpool.Get() | 
					
						
							|  |  |  | 	defer func() { | 
					
						
							| 
									
										
										
										
											2023-11-02 00:09:28 +08:00
										 |  |  | 		<-maxConcurrentScannerSaves | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 		buf.Reset() | 
					
						
							|  |  |  | 		bytebufferpool.Put(buf) | 
					
						
							|  |  |  | 	}() | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if err := d.serializeTo(buf); err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2022-12-06 05:01:11 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-01 01:45:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 	save := func(name string, timeout time.Duration) error { | 
					
						
							|  |  |  | 		// Abandon if more than a minute, so we don't hold up scanner.
 | 
					
						
							|  |  |  | 		ctx, cancel := context.WithTimeout(ctx, timeout) | 
					
						
							|  |  |  | 		defer cancel() | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2024-04-23 01:49:30 +08:00
										 |  |  | 		return saveConfig(ctx, store, pathJoin(bucketMetaPrefix, name), buf.Bytes()) | 
					
						
							| 
									
										
										
										
											2020-04-28 16:16:57 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-11-02 00:09:28 +08:00
										 |  |  | 	defer save(name+".bkp", 5*time.Second) // Keep a backup as well
 | 
					
						
							| 
									
										
										
										
											2023-09-15 02:53:52 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// drive timeout by default is 2 minutes, we do not need to wait longer.
 | 
					
						
							|  |  |  | 	return save(name, time.Minute) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // dataUsageCacheVer indicates the cache version.
 | 
					
						
							|  |  |  | // Bumping the cache version will drop data from previous versions
 | 
					
						
							|  |  |  | // and write new data with the new version.
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | const ( | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | 	dataUsageCacheVerCurrent = 8 | 
					
						
							|  |  |  | 	dataUsageCacheVerV7      = 7 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	dataUsageCacheVerV6      = 6 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	dataUsageCacheVerV5      = 5 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	dataUsageCacheVerV4      = 4 | 
					
						
							|  |  |  | 	dataUsageCacheVerV3      = 3 | 
					
						
							|  |  |  | 	dataUsageCacheVerV2      = 2 | 
					
						
							|  |  |  | 	dataUsageCacheVerV1      = 1 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | ) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // serialize the contents of the cache.
 | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | func (d *dataUsageCache) serializeTo(dst io.Writer) error { | 
					
						
							|  |  |  | 	// Add version and compress.
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	_, err := dst.Write([]byte{dataUsageCacheVerCurrent}) | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	enc, err := zstd.NewWriter(dst, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		zstd.WithEncoderLevel(zstd.SpeedFastest), | 
					
						
							|  |  |  | 		zstd.WithWindowSize(1<<20), | 
					
						
							|  |  |  | 		zstd.WithEncoderConcurrency(2)) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	mEnc := msgp.NewWriter(enc) | 
					
						
							|  |  |  | 	err = d.EncodeMsg(mEnc) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	err = mEnc.Flush() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	err = enc.Close() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // deserialize the supplied byte slice into the cache.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | func (d *dataUsageCache) deserialize(r io.Reader) error { | 
					
						
							|  |  |  | 	var b [1]byte | 
					
						
							|  |  |  | 	n, _ := r.Read(b[:]) | 
					
						
							|  |  |  | 	if n != 1 { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return io.ErrUnexpectedEOF | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-12 23:07:02 +08:00
										 |  |  | 	ver := int(b[0]) | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	switch ver { | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 	case dataUsageCacheVerV1: | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		return errors.New("cache version deprecated (will autoupdate)") | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 	case dataUsageCacheVerV2: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 		dold := &dataUsageCacheV2{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			d.Cache[k] = dataUsageEntry{ | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 				Size:      v.Size, | 
					
						
							|  |  |  | 				Objects:   v.Objects, | 
					
						
							|  |  |  | 				ObjSizes:  v.ObjSizes, | 
					
						
							|  |  |  | 				Children:  v.Children, | 
					
						
							|  |  |  | 				Compacted: len(v.Children) == 0 && k != d.Info.Name, | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV3: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 		dold := &dataUsageCacheV3{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			due := dataUsageEntry{ | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 				Size:     v.Size, | 
					
						
							|  |  |  | 				Objects:  v.Objects, | 
					
						
							|  |  |  | 				ObjSizes: v.ObjSizes, | 
					
						
							|  |  |  | 				Children: v.Children, | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			due.Compacted = len(due.Children) == 0 && k != d.Info.Name | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV4: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		dold := &dataUsageCacheV4{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			due := dataUsageEntry{ | 
					
						
							|  |  |  | 				Size:     v.Size, | 
					
						
							|  |  |  | 				Objects:  v.Objects, | 
					
						
							|  |  |  | 				ObjSizes: v.ObjSizes, | 
					
						
							|  |  |  | 				Children: v.Children, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			due.Compacted = len(due.Children) == 0 && k != d.Info.Name | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV5: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							|  |  |  | 		dold := &dataUsageCacheV5{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			due := dataUsageEntry{ | 
					
						
							|  |  |  | 				Size:     v.Size, | 
					
						
							|  |  |  | 				Objects:  v.Objects, | 
					
						
							|  |  |  | 				ObjSizes: v.ObjSizes, | 
					
						
							|  |  |  | 				Children: v.Children, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			due.Compacted = len(due.Children) == 0 && k != d.Info.Name | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-12 23:07:02 +08:00
										 |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	case dataUsageCacheVerV6: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							|  |  |  | 		dold := &dataUsageCacheV6{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			due := dataUsageEntry{ | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 				Children:  v.Children, | 
					
						
							|  |  |  | 				Size:      v.Size, | 
					
						
							|  |  |  | 				Objects:   v.Objects, | 
					
						
							|  |  |  | 				Versions:  v.Versions, | 
					
						
							|  |  |  | 				ObjSizes:  v.ObjSizes, | 
					
						
							|  |  |  | 				Compacted: v.Compacted, | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV7: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							|  |  |  | 		dold := &dataUsageCacheV7{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			var szHist sizeHistogram | 
					
						
							|  |  |  | 			szHist.mergeV1(v.ObjSizes) | 
					
						
							|  |  |  | 			d.Cache[k] = dataUsageEntry{ | 
					
						
							| 
									
										
										
										
											2024-10-05 06:23:33 +08:00
										 |  |  | 				Children:  v.Children, | 
					
						
							|  |  |  | 				Size:      v.Size, | 
					
						
							|  |  |  | 				Objects:   v.Objects, | 
					
						
							|  |  |  | 				Versions:  v.Versions, | 
					
						
							|  |  |  | 				ObjSizes:  szHist, | 
					
						
							|  |  |  | 				Compacted: v.Compacted, | 
					
						
							| 
									
										
										
										
											2024-01-13 15:51:08 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	case dataUsageCacheVerCurrent: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 		return d.DecodeMsg(msgp.NewReader(dec)) | 
					
						
							| 
									
										
										
										
											2021-05-12 23:07:02 +08:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 		return fmt.Errorf("dataUsageCache: unknown version: %d", ver) | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Trim this from start+end of hashes.
 | 
					
						
							|  |  |  | var hashPathCutSet = dataUsageRoot | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func init() { | 
					
						
							|  |  |  | 	if dataUsageRoot != string(filepath.Separator) { | 
					
						
							|  |  |  | 		hashPathCutSet = dataUsageRoot + string(filepath.Separator) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // hashPath calculates a hash of the provided string.
 | 
					
						
							|  |  |  | func hashPath(data string) dataUsageHash { | 
					
						
							|  |  |  | 	if data != dataUsageRoot { | 
					
						
							|  |  |  | 		data = strings.Trim(data, hashPathCutSet) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return dataUsageHash(path.Clean(data)) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | //msgp:ignore dataUsageHashMap
 | 
					
						
							|  |  |  | type dataUsageHashMap map[string]struct{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // DecodeMsg implements msgp.Decodable
 | 
					
						
							|  |  |  | func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) { | 
					
						
							|  |  |  | 	var zb0002 uint32 | 
					
						
							|  |  |  | 	zb0002, err = dc.ReadArrayHeader() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		err = msgp.WrapError(err) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	if zb0002 == 0 { | 
					
						
							|  |  |  | 		*z = nil | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	*z = make(dataUsageHashMap, zb0002) | 
					
						
							|  |  |  | 	for i := uint32(0); i < zb0002; i++ { | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			var zb0003 string | 
					
						
							|  |  |  | 			zb0003, err = dc.ReadString() | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				err = msgp.WrapError(err) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			(*z)[zb0003] = struct{}{} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // EncodeMsg implements msgp.Encodable
 | 
					
						
							|  |  |  | func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) { | 
					
						
							|  |  |  | 	err = en.WriteArrayHeader(uint32(len(z))) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		err = msgp.WrapError(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	for zb0004 := range z { | 
					
						
							|  |  |  | 		err = en.WriteString(zb0004) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			err = msgp.WrapError(err, zb0004) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // MarshalMsg implements msgp.Marshaler
 | 
					
						
							|  |  |  | func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) { | 
					
						
							|  |  |  | 	o = msgp.Require(b, z.Msgsize()) | 
					
						
							|  |  |  | 	o = msgp.AppendArrayHeader(o, uint32(len(z))) | 
					
						
							|  |  |  | 	for zb0004 := range z { | 
					
						
							|  |  |  | 		o = msgp.AppendString(o, zb0004) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // UnmarshalMsg implements msgp.Unmarshaler
 | 
					
						
							|  |  |  | func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) { | 
					
						
							|  |  |  | 	var zb0002 uint32 | 
					
						
							|  |  |  | 	zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		err = msgp.WrapError(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	if zb0002 == 0 { | 
					
						
							|  |  |  | 		*z = nil | 
					
						
							|  |  |  | 		return bts, nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	*z = make(dataUsageHashMap, zb0002) | 
					
						
							|  |  |  | 	for i := uint32(0); i < zb0002; i++ { | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			var zb0003 string | 
					
						
							|  |  |  | 			zb0003, bts, err = msgp.ReadStringBytes(bts) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				err = msgp.WrapError(err) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			(*z)[zb0003] = struct{}{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	o = bts | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
 | 
					
						
							|  |  |  | func (z dataUsageHashMap) Msgsize() (s int) { | 
					
						
							|  |  |  | 	s = msgp.ArrayHeaderSize | 
					
						
							|  |  |  | 	for zb0004 := range z { | 
					
						
							|  |  |  | 		s += msgp.StringPrefixSize + len(zb0004) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2022-07-06 05:45:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | //msgp:encode ignore currentScannerCycle
 | 
					
						
							|  |  |  | //msgp:decode ignore currentScannerCycle
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type currentScannerCycle struct { | 
					
						
							|  |  |  | 	current        uint64 | 
					
						
							|  |  |  | 	next           uint64 | 
					
						
							|  |  |  | 	started        time.Time | 
					
						
							|  |  |  | 	cycleCompleted []time.Time | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // clone returns a clone.
 | 
					
						
							|  |  |  | func (z currentScannerCycle) clone() currentScannerCycle { | 
					
						
							|  |  |  | 	z.cycleCompleted = append(make([]time.Time, 0, len(z.cycleCompleted)), z.cycleCompleted...) | 
					
						
							|  |  |  | 	return z | 
					
						
							|  |  |  | } |