| 
									
										
										
										
											2021-04-19 03:41:13 +08:00
										 |  |  | // Copyright (c) 2015-2021 MinIO, Inc.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This file is part of MinIO Object Storage stack
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is free software: you can redistribute it and/or modify
 | 
					
						
							|  |  |  | // it under the terms of the GNU Affero General Public License as published by
 | 
					
						
							|  |  |  | // the Free Software Foundation, either version 3 of the License, or
 | 
					
						
							|  |  |  | // (at your option) any later version.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // This program is distributed in the hope that it will be useful
 | 
					
						
							|  |  |  | // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
					
						
							|  |  |  | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
					
						
							|  |  |  | // GNU Affero General Public License for more details.
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | // You should have received a copy of the GNU Affero General Public License
 | 
					
						
							|  |  |  | // along with this program.  If not, see <http://www.gnu.org/licenses/>.
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | package cmd | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							| 
									
										
										
										
											2022-12-06 05:01:11 +08:00
										 |  |  | 	"bytes" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"context" | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	"errors" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"io" | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 	"math/rand" | 
					
						
							| 
									
										
										
										
											2021-01-01 01:45:09 +08:00
										 |  |  | 	"net/http" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"path" | 
					
						
							|  |  |  | 	"path/filepath" | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	"sort" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"strings" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/cespare/xxhash/v2" | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	"github.com/klauspost/compress/zstd" | 
					
						
							| 
									
										
										
										
											2022-12-07 05:46:50 +08:00
										 |  |  | 	"github.com/minio/madmin-go/v2" | 
					
						
							| 
									
										
										
										
											2021-06-02 05:59:40 +08:00
										 |  |  | 	"github.com/minio/minio/internal/bucket/lifecycle" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/hash" | 
					
						
							|  |  |  | 	"github.com/minio/minio/internal/logger" | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	"github.com/tinylib/msgp/msgp" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | //go:generate msgp -file $GOFILE -unexported
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // dataUsageHash is the hash type used.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | type dataUsageHash string | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // sizeHistogram is a size histogram.
 | 
					
						
							|  |  |  | type sizeHistogram [dataUsageBucketLen]uint64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | // versionsHistogram is a histogram of number of versions in an object.
 | 
					
						
							|  |  |  | type versionsHistogram [dataUsageVersionLen]uint64 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | type dataUsageEntry struct { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	Children dataUsageHashMap `msg:"ch"` | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	// These fields do no include any children.
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	Size             int64                `msg:"sz"` | 
					
						
							|  |  |  | 	Objects          uint64               `msg:"os"` | 
					
						
							|  |  |  | 	Versions         uint64               `msg:"vs"` // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	ObjSizes         sizeHistogram        `msg:"szs"` | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	ObjVersions      versionsHistogram    `msg:"vh"` | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	ReplicationStats *replicationAllStats `msg:"rs,omitempty"` | 
					
						
							|  |  |  | 	AllTierStats     *allTierStats        `msg:"ats,omitempty"` | 
					
						
							|  |  |  | 	Compacted        bool                 `msg:"c"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // allTierStats is a collection of per-tier stats across all configured remote
 | 
					
						
							|  |  |  | // tiers.
 | 
					
						
							|  |  |  | type allTierStats struct { | 
					
						
							|  |  |  | 	Tiers map[string]tierStats `msg:"ts"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func newAllTierStats() *allTierStats { | 
					
						
							|  |  |  | 	return &allTierStats{ | 
					
						
							|  |  |  | 		Tiers: make(map[string]tierStats), | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ats *allTierStats) addSizes(sz sizeSummary) { | 
					
						
							|  |  |  | 	for tier, st := range sz.tiers { | 
					
						
							|  |  |  | 		ats.Tiers[tier] = ats.Tiers[tier].add(st) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ats *allTierStats) merge(other *allTierStats) { | 
					
						
							|  |  |  | 	for tier, st := range other.Tiers { | 
					
						
							|  |  |  | 		ats.Tiers[tier] = ats.Tiers[tier].add(st) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ats *allTierStats) adminStats(stats map[string]madmin.TierStats) map[string]madmin.TierStats { | 
					
						
							|  |  |  | 	if ats == nil { | 
					
						
							|  |  |  | 		return stats | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update stats for tiers as they become available.
 | 
					
						
							|  |  |  | 	for tier, st := range ats.Tiers { | 
					
						
							|  |  |  | 		stats[tier] = madmin.TierStats{ | 
					
						
							|  |  |  | 			TotalSize:   st.TotalSize, | 
					
						
							|  |  |  | 			NumVersions: st.NumVersions, | 
					
						
							|  |  |  | 			NumObjects:  st.NumObjects, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return stats | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // tierStats holds per-tier stats of a remote tier.
 | 
					
						
							|  |  |  | type tierStats struct { | 
					
						
							|  |  |  | 	TotalSize   uint64 `msg:"ts"` | 
					
						
							|  |  |  | 	NumVersions int    `msg:"nv"` | 
					
						
							|  |  |  | 	NumObjects  int    `msg:"no"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (ts tierStats) add(u tierStats) tierStats { | 
					
						
							|  |  |  | 	ts.TotalSize += u.TotalSize | 
					
						
							|  |  |  | 	ts.NumVersions += u.NumVersions | 
					
						
							|  |  |  | 	ts.NumObjects += u.NumObjects | 
					
						
							|  |  |  | 	return ts | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | //msgp:tuple replicationStatsV1
 | 
					
						
							|  |  |  | type replicationStatsV1 struct { | 
					
						
							|  |  |  | 	PendingSize          uint64 | 
					
						
							|  |  |  | 	ReplicatedSize       uint64 | 
					
						
							|  |  |  | 	FailedSize           uint64 | 
					
						
							|  |  |  | 	ReplicaSize          uint64 | 
					
						
							|  |  |  | 	FailedCount          uint64 | 
					
						
							|  |  |  | 	PendingCount         uint64 | 
					
						
							|  |  |  | 	MissedThresholdSize  uint64 | 
					
						
							|  |  |  | 	AfterThresholdSize   uint64 | 
					
						
							|  |  |  | 	MissedThresholdCount uint64 | 
					
						
							|  |  |  | 	AfterThresholdCount  uint64 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func (rsv1 replicationStatsV1) Empty() bool { | 
					
						
							|  |  |  | 	return rsv1.ReplicatedSize == 0 && | 
					
						
							|  |  |  | 		rsv1.FailedSize == 0 && | 
					
						
							|  |  |  | 		rsv1.FailedCount == 0 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | //msgp:tuple replicationStats
 | 
					
						
							|  |  |  | type replicationStats struct { | 
					
						
							|  |  |  | 	PendingSize          uint64 | 
					
						
							|  |  |  | 	ReplicatedSize       uint64 | 
					
						
							|  |  |  | 	FailedSize           uint64 | 
					
						
							|  |  |  | 	FailedCount          uint64 | 
					
						
							|  |  |  | 	PendingCount         uint64 | 
					
						
							|  |  |  | 	MissedThresholdSize  uint64 | 
					
						
							|  |  |  | 	AfterThresholdSize   uint64 | 
					
						
							|  |  |  | 	MissedThresholdCount uint64 | 
					
						
							|  |  |  | 	AfterThresholdCount  uint64 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | func (rs replicationStats) Empty() bool { | 
					
						
							|  |  |  | 	return rs.ReplicatedSize == 0 && | 
					
						
							|  |  |  | 		rs.FailedSize == 0 && | 
					
						
							|  |  |  | 		rs.FailedCount == 0 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type replicationAllStats struct { | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	Targets     map[string]replicationStats `msg:"t,omitempty"` | 
					
						
							|  |  |  | 	ReplicaSize uint64                      `msg:"r,omitempty"` | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | //msgp:tuple replicationAllStatsV1
 | 
					
						
							|  |  |  | type replicationAllStatsV1 struct { | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	Targets     map[string]replicationStats | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 	ReplicaSize uint64 `msg:"ReplicaSize,omitempty"` | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | //msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
 | 
					
						
							|  |  |  | //msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV2
 | 
					
						
							|  |  |  | type dataUsageEntryV2 struct { | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							|  |  |  | 	Size     int64 | 
					
						
							|  |  |  | 	Objects  uint64 | 
					
						
							|  |  |  | 	ObjSizes sizeHistogram | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV3
 | 
					
						
							|  |  |  | type dataUsageEntryV3 struct { | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							|  |  |  | 	Size                   int64 | 
					
						
							|  |  |  | 	ReplicatedSize         uint64 | 
					
						
							|  |  |  | 	ReplicationPendingSize uint64 | 
					
						
							|  |  |  | 	ReplicationFailedSize  uint64 | 
					
						
							|  |  |  | 	ReplicaSize            uint64 | 
					
						
							|  |  |  | 	Objects                uint64 | 
					
						
							|  |  |  | 	ObjSizes               sizeHistogram | 
					
						
							|  |  |  | 	Children               dataUsageHashMap | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV4
 | 
					
						
							|  |  |  | type dataUsageEntryV4 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							|  |  |  | 	Size             int64 | 
					
						
							|  |  |  | 	Objects          uint64 | 
					
						
							|  |  |  | 	ObjSizes         sizeHistogram | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 	ReplicationStats replicationStatsV1 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV5
 | 
					
						
							|  |  |  | type dataUsageEntryV5 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							|  |  |  | 	Size             int64 | 
					
						
							|  |  |  | 	Objects          uint64 | 
					
						
							|  |  |  | 	Versions         uint64 // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	ObjSizes         sizeHistogram | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 	ReplicationStats *replicationStatsV1 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	Compacted        bool | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | //msgp:tuple dataUsageEntryV6
 | 
					
						
							|  |  |  | type dataUsageEntryV6 struct { | 
					
						
							|  |  |  | 	Children dataUsageHashMap | 
					
						
							|  |  |  | 	// These fields do no include any children.
 | 
					
						
							|  |  |  | 	Size             int64 | 
					
						
							|  |  |  | 	Objects          uint64 | 
					
						
							|  |  |  | 	Versions         uint64 // Versions that are not delete markers.
 | 
					
						
							|  |  |  | 	ObjSizes         sizeHistogram | 
					
						
							|  |  |  | 	ReplicationStats *replicationAllStatsV1 | 
					
						
							|  |  |  | 	Compacted        bool | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // dataUsageCache contains a cache of data usage entries latest version.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | type dataUsageCache struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntry | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | //msgp:encode ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
 | 
					
						
							|  |  |  | //msgp:marshal ignore dataUsageCacheV2 dataUsageCacheV3 dataUsageCacheV4 dataUsageCacheV5 dataUsageCacheV6
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | // dataUsageCacheV2 contains a cache of data usage entries version 2.
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | type dataUsageCacheV2 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV2 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV3 contains a cache of data usage entries version 3.
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | type dataUsageCacheV3 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV3 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV4 contains a cache of data usage entries version 4.
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | type dataUsageCacheV4 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV4 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV5 contains a cache of data usage entries version 5.
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | type dataUsageCacheV5 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV5 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | // dataUsageCacheV6 contains a cache of data usage entries version 6.
 | 
					
						
							|  |  |  | type dataUsageCacheV6 struct { | 
					
						
							|  |  |  | 	Info  dataUsageCacheInfo | 
					
						
							|  |  |  | 	Cache map[string]dataUsageEntryV6 | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | //msgp:ignore dataUsageEntryInfo
 | 
					
						
							|  |  |  | type dataUsageEntryInfo struct { | 
					
						
							|  |  |  | 	Name   string | 
					
						
							|  |  |  | 	Parent string | 
					
						
							|  |  |  | 	Entry  dataUsageEntry | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type dataUsageCacheInfo struct { | 
					
						
							|  |  |  | 	// Name of the bucket. Also root element.
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	Name       string | 
					
						
							|  |  |  | 	NextCycle  uint32 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 	LastUpdate time.Time | 
					
						
							| 
									
										
										
										
											2021-02-18 04:04:11 +08:00
										 |  |  | 	// indicates if the disk is being healed and scanner
 | 
					
						
							| 
									
										
										
										
											2021-01-19 18:40:52 +08:00
										 |  |  | 	// should skip healing the disk
 | 
					
						
							|  |  |  | 	SkipHealing bool | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Active lifecycle, if any on the bucket
 | 
					
						
							|  |  |  | 	lifeCycle *lifecycle.Lifecycle `msg:"-"` | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// optional updates channel.
 | 
					
						
							|  |  |  | 	// If set updates will be sent regularly to this channel.
 | 
					
						
							|  |  |  | 	// Will not be closed when returned.
 | 
					
						
							| 
									
										
										
										
											2021-06-02 10:59:11 +08:00
										 |  |  | 	updates     chan<- dataUsageEntry `msg:"-"` | 
					
						
							|  |  |  | 	replication replicationConfig     `msg:"-"` | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | func (e *dataUsageEntry) addSizes(summary sizeSummary) { | 
					
						
							|  |  |  | 	e.Size += summary.totalSize | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	e.Versions += summary.versions | 
					
						
							|  |  |  | 	e.ObjSizes.add(summary.totalSize) | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	e.ObjVersions.add(summary.versions) | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-23 10:13:50 +08:00
										 |  |  | 	if e.ReplicationStats == nil { | 
					
						
							|  |  |  | 		e.ReplicationStats = &replicationAllStats{ | 
					
						
							|  |  |  | 			Targets: make(map[string]replicationStats), | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-10-23 10:13:50 +08:00
										 |  |  | 	} else if e.ReplicationStats.Targets == nil { | 
					
						
							|  |  |  | 		e.ReplicationStats.Targets = make(map[string]replicationStats) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if summary.replTargetStats != nil { | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		for arn, st := range summary.replTargetStats { | 
					
						
							|  |  |  | 			tgtStat, ok := e.ReplicationStats.Targets[arn] | 
					
						
							|  |  |  | 			if !ok { | 
					
						
							|  |  |  | 				tgtStat = replicationStats{} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-10-23 10:13:50 +08:00
										 |  |  | 			tgtStat.PendingSize += uint64(st.pendingSize) | 
					
						
							|  |  |  | 			tgtStat.FailedSize += uint64(st.failedSize) | 
					
						
							|  |  |  | 			tgtStat.ReplicatedSize += uint64(st.replicatedSize) | 
					
						
							|  |  |  | 			tgtStat.FailedCount += st.failedCount | 
					
						
							|  |  |  | 			tgtStat.PendingCount += st.pendingCount | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 			e.ReplicationStats.Targets[arn] = tgtStat | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	if summary.tiers != nil { | 
					
						
							|  |  |  | 		if e.AllTierStats == nil { | 
					
						
							|  |  |  | 			e.AllTierStats = newAllTierStats() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		e.AllTierStats.addSizes(summary) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // merge other data usage entry into this, excluding children.
 | 
					
						
							|  |  |  | func (e *dataUsageEntry) merge(other dataUsageEntry) { | 
					
						
							|  |  |  | 	e.Objects += other.Objects | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	e.Versions += other.Versions | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	e.Size += other.Size | 
					
						
							| 
									
										
										
										
											2021-10-23 10:13:50 +08:00
										 |  |  | 	if other.ReplicationStats != nil { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		if e.ReplicationStats == nil { | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 			e.ReplicationStats = &replicationAllStats{Targets: make(map[string]replicationStats)} | 
					
						
							| 
									
										
										
										
											2021-10-23 10:13:50 +08:00
										 |  |  | 		} else if e.ReplicationStats.Targets == nil { | 
					
						
							|  |  |  | 			e.ReplicationStats.Targets = make(map[string]replicationStats) | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-10-23 10:13:50 +08:00
										 |  |  | 		e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize | 
					
						
							|  |  |  | 		for arn, stat := range other.ReplicationStats.Targets { | 
					
						
							|  |  |  | 			st := e.ReplicationStats.Targets[arn] | 
					
						
							|  |  |  | 			e.ReplicationStats.Targets[arn] = replicationStats{ | 
					
						
							|  |  |  | 				PendingSize:    stat.PendingSize + st.PendingSize, | 
					
						
							|  |  |  | 				FailedSize:     stat.FailedSize + st.FailedSize, | 
					
						
							|  |  |  | 				ReplicatedSize: stat.ReplicatedSize + st.ReplicatedSize, | 
					
						
							|  |  |  | 				PendingCount:   stat.PendingCount + st.PendingCount, | 
					
						
							|  |  |  | 				FailedCount:    stat.FailedCount + st.FailedCount, | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-08 05:47:48 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	for i, v := range other.ObjSizes[:] { | 
					
						
							|  |  |  | 		e.ObjSizes[i] += v | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	for i, v := range other.ObjVersions[:] { | 
					
						
							|  |  |  | 		e.ObjVersions[i] += v | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	if other.AllTierStats != nil { | 
					
						
							|  |  |  | 		if e.AllTierStats == nil { | 
					
						
							|  |  |  | 			e.AllTierStats = newAllTierStats() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		e.AllTierStats.merge(other.AllTierStats) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // mod returns true if the hash mod cycles == cycle.
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | // If cycles is 0 false is always returned.
 | 
					
						
							|  |  |  | // If cycles is 1 true is always returned (as expected).
 | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | func (h dataUsageHash) mod(cycle uint32, cycles uint32) bool { | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | 	if cycles <= 1 { | 
					
						
							|  |  |  | 		return cycles == 1 | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return uint32(xxhash.Sum64String(string(h)))%cycles == cycle%cycles | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-03-08 01:25:53 +08:00
										 |  |  | // modAlt returns true if the hash mod cycles == cycle.
 | 
					
						
							|  |  |  | // This is out of sync with mod.
 | 
					
						
							|  |  |  | // If cycles is 0 false is always returned.
 | 
					
						
							|  |  |  | // If cycles is 1 true is always returned (as expected).
 | 
					
						
							|  |  |  | func (h dataUsageHash) modAlt(cycle uint32, cycles uint32) bool { | 
					
						
							|  |  |  | 	if cycles <= 1 { | 
					
						
							|  |  |  | 		return cycles == 1 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return uint32(xxhash.Sum64String(string(h))>>32)%(cycles) == cycle%cycles | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // addChild will add a child based on its hash.
 | 
					
						
							|  |  |  | // If it already exists it will not be added again.
 | 
					
						
							|  |  |  | func (e *dataUsageEntry) addChild(hash dataUsageHash) { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	if _, ok := e.Children[hash.Key()]; ok { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if e.Children == nil { | 
					
						
							|  |  |  | 		e.Children = make(dataUsageHashMap, 1) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	e.Children[hash.Key()] = struct{}{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-08-24 22:11:38 +08:00
										 |  |  | // Create a clone of the entry.
 | 
					
						
							|  |  |  | func (e dataUsageEntry) clone() dataUsageEntry { | 
					
						
							|  |  |  | 	// We operate on a copy from the receiver.
 | 
					
						
							|  |  |  | 	if e.Children != nil { | 
					
						
							|  |  |  | 		ch := make(dataUsageHashMap, len(e.Children)) | 
					
						
							|  |  |  | 		for k, v := range e.Children { | 
					
						
							|  |  |  | 			ch[k] = v | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		e.Children = ch | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if e.ReplicationStats != nil { | 
					
						
							|  |  |  | 		// Copy to new struct
 | 
					
						
							|  |  |  | 		r := *e.ReplicationStats | 
					
						
							|  |  |  | 		e.ReplicationStats = &r | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	if e.AllTierStats != nil { | 
					
						
							|  |  |  | 		ats := newAllTierStats() | 
					
						
							|  |  |  | 		ats.merge(e.AllTierStats) | 
					
						
							|  |  |  | 		e.AllTierStats = ats | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-08-24 22:11:38 +08:00
										 |  |  | 	return e | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // find a path in the cache.
 | 
					
						
							|  |  |  | // Returns nil if not found.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) find(path string) *dataUsageEntry { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	due, ok := d.Cache[hashPath(path).Key()] | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return &due | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // isCompacted returns whether an entry is compacted.
 | 
					
						
							|  |  |  | // Returns false if not found.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) isCompacted(h dataUsageHash) bool { | 
					
						
							|  |  |  | 	due, ok := d.Cache[h.Key()] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return due.Compacted | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-08-25 04:47:01 +08:00
										 |  |  | // findChildrenCopy returns a copy of the children of the supplied hash.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) findChildrenCopy(h dataUsageHash) dataUsageHashMap { | 
					
						
							|  |  |  | 	ch := d.Cache[h.String()].Children | 
					
						
							|  |  |  | 	res := make(dataUsageHashMap, len(ch)) | 
					
						
							|  |  |  | 	for k := range ch { | 
					
						
							|  |  |  | 		res[k] = struct{}{} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-20 05:38:30 +08:00
										 |  |  | // searchParent will search for the parent of h.
 | 
					
						
							|  |  |  | // This is an O(N*N) operation if there is no parent or it cannot be guessed.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash { | 
					
						
							|  |  |  | 	want := h.Key() | 
					
						
							|  |  |  | 	if idx := strings.LastIndexByte(want, '/'); idx >= 0 { | 
					
						
							|  |  |  | 		if v := d.find(want[:idx]); v != nil { | 
					
						
							|  |  |  | 			for child := range v.Children { | 
					
						
							|  |  |  | 				if child == want { | 
					
						
							|  |  |  | 					found := hashPath(want[:idx]) | 
					
						
							|  |  |  | 					return &found | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for k, v := range d.Cache { | 
					
						
							|  |  |  | 		for child := range v.Children { | 
					
						
							|  |  |  | 			if child == want { | 
					
						
							|  |  |  | 				found := dataUsageHash(k) | 
					
						
							|  |  |  | 				return &found | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // deleteRecursive will delete an entry recursively, but not change its parent.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | func (d *dataUsageCache) deleteRecursive(h dataUsageHash) { | 
					
						
							|  |  |  | 	if existing, ok := d.Cache[h.String()]; ok { | 
					
						
							|  |  |  | 		// Delete first if there should be a loop.
 | 
					
						
							|  |  |  | 		delete(d.Cache, h.Key()) | 
					
						
							|  |  |  | 		for child := range existing.Children { | 
					
						
							|  |  |  | 			d.deleteRecursive(dataUsageHash(child)) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | // dui converts the flattened version of the path to madmin.DataUsageInfo.
 | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | // As a side effect d will be flattened, use a clone if this is not ok.
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	e := d.find(path) | 
					
						
							|  |  |  | 	if e == nil { | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | 		// No entry found, return empty.
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		return DataUsageInfo{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	flat := d.flatten(*e) | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	dui := DataUsageInfo{ | 
					
						
							| 
									
										
										
										
											2022-04-26 13:04:10 +08:00
										 |  |  | 		LastUpdate:         d.Info.LastUpdate, | 
					
						
							|  |  |  | 		ObjectsTotalCount:  flat.Objects, | 
					
						
							|  |  |  | 		VersionsTotalCount: flat.Versions, | 
					
						
							|  |  |  | 		ObjectsTotalSize:   uint64(flat.Size), | 
					
						
							|  |  |  | 		BucketsCount:       uint64(len(e.Children)), | 
					
						
							|  |  |  | 		BucketsUsage:       d.bucketsUsageInfo(buckets), | 
					
						
							|  |  |  | 		TierStats:          d.tiersUsageInfo(buckets), | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return dui | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // replace will add or replace an entry in the cache.
 | 
					
						
							|  |  |  | // If a parent is specified it will be added to that if not already there.
 | 
					
						
							|  |  |  | // If the parent does not exist, it will be added.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) replace(path, parent string, e dataUsageEntry) { | 
					
						
							|  |  |  | 	hash := hashPath(path) | 
					
						
							|  |  |  | 	if d.Cache == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache = make(map[string]dataUsageEntry, 100) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	d.Cache[hash.Key()] = e | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if parent != "" { | 
					
						
							|  |  |  | 		phash := hashPath(parent) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		p := d.Cache[phash.Key()] | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		p.addChild(hash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache[phash.Key()] = p | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // replaceHashed add or replaces an entry to the cache based on its hash.
 | 
					
						
							|  |  |  | // If a parent is specified it will be added to that if not already there.
 | 
					
						
							|  |  |  | // If the parent does not exist, it will be added.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) replaceHashed(hash dataUsageHash, parent *dataUsageHash, e dataUsageEntry) { | 
					
						
							|  |  |  | 	if d.Cache == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache = make(map[string]dataUsageEntry, 100) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	d.Cache[hash.Key()] = e | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if parent != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		p := d.Cache[parent.Key()] | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		p.addChild(hash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache[parent.Key()] = p | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | // copyWithChildren will copy entry with hash from src if it exists along with any children.
 | 
					
						
							|  |  |  | // If a parent is specified it will be added to that if not already there.
 | 
					
						
							|  |  |  | // If the parent does not exist, it will be added.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHash, parent *dataUsageHash) { | 
					
						
							|  |  |  | 	if d.Cache == nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache = make(map[string]dataUsageEntry, 100) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	e, ok := src.Cache[hash.String()] | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	d.Cache[hash.Key()] = e | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	for ch := range e.Children { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		if ch == hash.Key() { | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 			logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.copyWithChildren(src, dataUsageHash(ch), &hash) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if parent != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		p := d.Cache[parent.Key()] | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 		p.addChild(hash) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.Cache[parent.Key()] = p | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // reduceChildrenOf will reduce the recursive number of children to the limit
 | 
					
						
							|  |  |  | // by compacting the children with the least number of objects.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compactSelf bool) { | 
					
						
							|  |  |  | 	e, ok := d.Cache[path.Key()] | 
					
						
							|  |  |  | 	if !ok { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if e.Compacted { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	// If direct children have more, compact all.
 | 
					
						
							|  |  |  | 	if len(e.Children) > limit && compactSelf { | 
					
						
							|  |  |  | 		flat := d.sizeRecursive(path.Key()) | 
					
						
							|  |  |  | 		flat.Compacted = true | 
					
						
							|  |  |  | 		d.deleteRecursive(path) | 
					
						
							|  |  |  | 		d.replaceHashed(path, nil, *flat) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	total := d.totalChildrenRec(path.Key()) | 
					
						
							|  |  |  | 	if total < limit { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Appears to be printed with _MINIO_SERVER_DEBUG=off
 | 
					
						
							|  |  |  | 	// console.Debugf(" %d children found, compacting %v\n", total, path)
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	leaves := make([]struct { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		objects uint64 | 
					
						
							|  |  |  | 		path    dataUsageHash | 
					
						
							|  |  |  | 	}, total) | 
					
						
							|  |  |  | 	// Collect current leaves that have children.
 | 
					
						
							|  |  |  | 	leaves = leaves[:0] | 
					
						
							|  |  |  | 	remove := total - limit | 
					
						
							|  |  |  | 	var add func(path dataUsageHash) | 
					
						
							|  |  |  | 	add = func(path dataUsageHash) { | 
					
						
							|  |  |  | 		e, ok := d.Cache[path.Key()] | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if len(e.Children) == 0 { | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		sz := d.sizeRecursive(path.Key()) | 
					
						
							|  |  |  | 		leaves = append(leaves, struct { | 
					
						
							|  |  |  | 			objects uint64 | 
					
						
							|  |  |  | 			path    dataUsageHash | 
					
						
							|  |  |  | 		}{objects: sz.Objects, path: path}) | 
					
						
							|  |  |  | 		for ch := range e.Children { | 
					
						
							|  |  |  | 			add(dataUsageHash(ch)) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Add path recursively.
 | 
					
						
							|  |  |  | 	add(path) | 
					
						
							|  |  |  | 	sort.Slice(leaves, func(i, j int) bool { | 
					
						
							|  |  |  | 		return leaves[i].objects < leaves[j].objects | 
					
						
							|  |  |  | 	}) | 
					
						
							|  |  |  | 	for remove > 0 && len(leaves) > 0 { | 
					
						
							|  |  |  | 		// Remove top entry.
 | 
					
						
							|  |  |  | 		e := leaves[0] | 
					
						
							|  |  |  | 		candidate := e.path | 
					
						
							|  |  |  | 		if candidate == path && !compactSelf { | 
					
						
							|  |  |  | 			// We should be the biggest,
 | 
					
						
							|  |  |  | 			// if we cannot compact ourself, we are done.
 | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		removing := d.totalChildrenRec(candidate.Key()) | 
					
						
							|  |  |  | 		flat := d.sizeRecursive(candidate.Key()) | 
					
						
							|  |  |  | 		if flat == nil { | 
					
						
							|  |  |  | 			leaves = leaves[1:] | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// Appears to be printed with _MINIO_SERVER_DEBUG=off
 | 
					
						
							|  |  |  | 		// console.Debugf("compacting %v, removing %d children\n", candidate, removing)
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		flat.Compacted = true | 
					
						
							|  |  |  | 		d.deleteRecursive(candidate) | 
					
						
							|  |  |  | 		d.replaceHashed(candidate, nil, *flat) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Remove top entry and subtract removed children.
 | 
					
						
							|  |  |  | 		remove -= removing | 
					
						
							|  |  |  | 		leaves = leaves[1:] | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // StringAll returns a detailed string representation of all entries in the cache.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) StringAll() string { | 
					
						
							| 
									
										
										
										
											2021-08-05 00:14:14 +08:00
										 |  |  | 	// Remove bloom filter from print.
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	s := fmt.Sprintf("info:%+v\n", d.Info) | 
					
						
							|  |  |  | 	for k, v := range d.Cache { | 
					
						
							|  |  |  | 		s += fmt.Sprintf("\t%v: %+v\n", k, v) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return strings.TrimSpace(s) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // String returns a human readable representation of the string.
 | 
					
						
							|  |  |  | func (h dataUsageHash) String() string { | 
					
						
							|  |  |  | 	return string(h) | 
					
						
							| 
									
										
										
										
											2020-04-28 01:06:21 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // Key returns the key.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | func (h dataUsageHash) Key() string { | 
					
						
							|  |  |  | 	return string(h) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-07-14 01:42:11 +08:00
										 |  |  | func (d *dataUsageCache) flattenChildrens(root dataUsageEntry) (m map[string]dataUsageEntry) { | 
					
						
							|  |  |  | 	m = make(map[string]dataUsageEntry) | 
					
						
							|  |  |  | 	for id := range root.Children { | 
					
						
							|  |  |  | 		e := d.Cache[id] | 
					
						
							|  |  |  | 		if len(e.Children) > 0 { | 
					
						
							|  |  |  | 			e = d.flatten(e) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		m[id] = e | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return m | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // flatten all children of the root into the root element and return it.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) flatten(root dataUsageEntry) dataUsageEntry { | 
					
						
							|  |  |  | 	for id := range root.Children { | 
					
						
							|  |  |  | 		e := d.Cache[id] | 
					
						
							|  |  |  | 		if len(e.Children) > 0 { | 
					
						
							|  |  |  | 			e = d.flatten(e) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		root.merge(e) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	root.Children = nil | 
					
						
							|  |  |  | 	return root | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // add a size to the histogram.
 | 
					
						
							|  |  |  | func (h *sizeHistogram) add(size int64) { | 
					
						
							|  |  |  | 	// Fetch the histogram interval corresponding
 | 
					
						
							|  |  |  | 	// to the passed object size.
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 	for i, interval := range ObjectsHistogramIntervals[:] { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		if size >= interval.start && size <= interval.end { | 
					
						
							|  |  |  | 			h[i]++ | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | // toMap returns the map to a map[string]uint64.
 | 
					
						
							|  |  |  | func (h *sizeHistogram) toMap() map[string]uint64 { | 
					
						
							|  |  |  | 	res := make(map[string]uint64, dataUsageBucketLen) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	for i, count := range h { | 
					
						
							|  |  |  | 		res[ObjectsHistogramIntervals[i].name] = count | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | // add a version count to the histogram.
 | 
					
						
							|  |  |  | func (h *versionsHistogram) add(versions uint64) { | 
					
						
							|  |  |  | 	// Fetch the histogram interval corresponding
 | 
					
						
							|  |  |  | 	// to the passed object size.
 | 
					
						
							|  |  |  | 	for i, interval := range ObjectsVersionCountIntervals[:] { | 
					
						
							|  |  |  | 		if versions >= uint64(interval.start) && versions <= uint64(interval.end) { | 
					
						
							|  |  |  | 			h[i]++ | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // toMap returns the map to a map[string]uint64.
 | 
					
						
							|  |  |  | func (h *versionsHistogram) toMap() map[string]uint64 { | 
					
						
							|  |  |  | 	res := make(map[string]uint64, dataUsageVersionLen) | 
					
						
							|  |  |  | 	for i, count := range h { | 
					
						
							|  |  |  | 		res[ObjectsVersionCountIntervals[i].name] = count | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return res | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | func (d *dataUsageCache) tiersUsageInfo(buckets []BucketInfo) *allTierStats { | 
					
						
							|  |  |  | 	dst := newAllTierStats() | 
					
						
							|  |  |  | 	for _, bucket := range buckets { | 
					
						
							|  |  |  | 		e := d.find(bucket.Name) | 
					
						
							|  |  |  | 		if e == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		flat := d.flatten(*e) | 
					
						
							|  |  |  | 		if flat.AllTierStats == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		dst.merge(flat.AllTierStats) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if len(dst.Tiers) == 0 { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return dst | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | // bucketsUsageInfo returns the buckets usage info as a map, with
 | 
					
						
							|  |  |  | // key as bucket name
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]BucketUsageInfo { | 
					
						
							| 
									
										
										
										
											2022-01-03 01:15:06 +08:00
										 |  |  | 	dst := make(map[string]BucketUsageInfo, len(buckets)) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	for _, bucket := range buckets { | 
					
						
							|  |  |  | 		e := d.find(bucket.Name) | 
					
						
							|  |  |  | 		if e == nil { | 
					
						
							|  |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		flat := d.flatten(*e) | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		bui := BucketUsageInfo{ | 
					
						
							| 
									
										
										
										
											2023-03-11 00:53:59 +08:00
										 |  |  | 			Size:                    uint64(flat.Size), | 
					
						
							|  |  |  | 			VersionsCount:           flat.Versions, | 
					
						
							|  |  |  | 			ObjectsCount:            flat.Objects, | 
					
						
							|  |  |  | 			ObjectSizesHistogram:    flat.ObjSizes.toMap(), | 
					
						
							|  |  |  | 			ObjectVersionsHistogram: flat.ObjVersions.toMap(), | 
					
						
							| 
									
										
										
										
											2020-05-27 21:45:43 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		if flat.ReplicationStats != nil { | 
					
						
							|  |  |  | 			bui.ReplicaSize = flat.ReplicationStats.ReplicaSize | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 			bui.ReplicationInfo = make(map[string]BucketTargetUsageInfo, len(flat.ReplicationStats.Targets)) | 
					
						
							|  |  |  | 			for arn, stat := range flat.ReplicationStats.Targets { | 
					
						
							|  |  |  | 				bui.ReplicationInfo[arn] = BucketTargetUsageInfo{ | 
					
						
							|  |  |  | 					ReplicationPendingSize:  stat.PendingSize, | 
					
						
							|  |  |  | 					ReplicatedSize:          stat.ReplicatedSize, | 
					
						
							|  |  |  | 					ReplicationFailedSize:   stat.FailedSize, | 
					
						
							|  |  |  | 					ReplicationPendingCount: stat.PendingCount, | 
					
						
							|  |  |  | 					ReplicationFailedCount:  stat.FailedCount, | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		dst[bucket.Name] = bui | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return dst | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // sizeRecursive returns the path as a flattened entry.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry { | 
					
						
							|  |  |  | 	root := d.find(path) | 
					
						
							|  |  |  | 	if root == nil || len(root.Children) == 0 { | 
					
						
							|  |  |  | 		return root | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	flat := d.flatten(*root) | 
					
						
							|  |  |  | 	return &flat | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | // totalChildrenRec returns the total number of children recorded.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) totalChildrenRec(path string) int { | 
					
						
							|  |  |  | 	root := d.find(path) | 
					
						
							|  |  |  | 	if root == nil || len(root.Children) == 0 { | 
					
						
							|  |  |  | 		return 0 | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	n := len(root.Children) | 
					
						
							|  |  |  | 	for ch := range root.Children { | 
					
						
							|  |  |  | 		n += d.totalChildrenRec(ch) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return n | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // root returns the root of the cache.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) root() *dataUsageEntry { | 
					
						
							|  |  |  | 	return d.find(d.Info.Name) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // rootHash returns the root of the cache.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) rootHash() dataUsageHash { | 
					
						
							|  |  |  | 	return hashPath(d.Info.Name) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // clone returns a copy of the cache with no references to the existing.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) clone() dataUsageCache { | 
					
						
							|  |  |  | 	clone := dataUsageCache{ | 
					
						
							|  |  |  | 		Info:  d.Info, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		Cache: make(map[string]dataUsageEntry, len(d.Cache)), | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	for k, v := range d.Cache { | 
					
						
							| 
									
										
										
										
											2021-08-24 22:11:38 +08:00
										 |  |  | 		clone.Cache[k] = v.clone() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return clone | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // merge root of other into d.
 | 
					
						
							|  |  |  | // children of root will be flattened before being merged.
 | 
					
						
							|  |  |  | // Last update time will be set to the last updated.
 | 
					
						
							|  |  |  | func (d *dataUsageCache) merge(other dataUsageCache) { | 
					
						
							|  |  |  | 	existingRoot := d.root() | 
					
						
							|  |  |  | 	otherRoot := other.root() | 
					
						
							|  |  |  | 	if existingRoot == nil && otherRoot == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if otherRoot == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if existingRoot == nil { | 
					
						
							|  |  |  | 		*d = other.clone() | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if other.Info.LastUpdate.After(d.Info.LastUpdate) { | 
					
						
							|  |  |  | 		d.Info.LastUpdate = other.Info.LastUpdate | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	existingRoot.merge(*otherRoot) | 
					
						
							|  |  |  | 	eHash := d.rootHash() | 
					
						
							|  |  |  | 	for key := range otherRoot.Children { | 
					
						
							|  |  |  | 		entry := other.Cache[key] | 
					
						
							|  |  |  | 		flat := other.flatten(entry) | 
					
						
							|  |  |  | 		existing := d.Cache[key] | 
					
						
							|  |  |  | 		// If not found, merging simply adds.
 | 
					
						
							|  |  |  | 		existing.merge(flat) | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		d.replaceHashed(dataUsageHash(key), &eHash, existing) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | type objectIO interface { | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 	GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (reader *GetObjectReader, err error) | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | 	PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // load the cache content with name from minioMetaBackgroundOpsBucket.
 | 
					
						
							|  |  |  | // Only backend errors are returned as errors.
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | // The loader is optimistic and has no locking, but tries 5 times before giving up.
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // If the object is not found or unable to deserialize d is cleared and nil error is returned.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	// Abandon if more than 5 minutes, so we don't hold up scanner.
 | 
					
						
							|  |  |  | 	ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2022-01-04 02:22:58 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 	// Caches are read+written without locks,
 | 
					
						
							|  |  |  | 	retries := 0 | 
					
						
							|  |  |  | 	for retries < 5 { | 
					
						
							| 
									
										
										
										
											2023-04-18 03:16:37 +08:00
										 |  |  | 		r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true}) | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			switch err.(type) { | 
					
						
							|  |  |  | 			case ObjectNotFound, BucketNotFound: | 
					
						
							|  |  |  | 			case InsufficientReadQuorum, StorageErr: | 
					
						
							|  |  |  | 				retries++ | 
					
						
							|  |  |  | 				time.Sleep(time.Duration(rand.Int63n(int64(time.Second)))) | 
					
						
							|  |  |  | 				continue | 
					
						
							|  |  |  | 			default: | 
					
						
							|  |  |  | 				return toObjectErr(err, dataUsageBucket, name) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			*d = dataUsageCache{} | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if err := d.deserialize(r); err != nil { | 
					
						
							|  |  |  | 			r.Close() | 
					
						
							|  |  |  | 			retries++ | 
					
						
							|  |  |  | 			time.Sleep(time.Duration(rand.Int63n(int64(time.Second)))) | 
					
						
							|  |  |  | 			continue | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 		r.Close() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 	*d = dataUsageCache{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | // Maximum running concurrent saves on server.
 | 
					
						
							|  |  |  | var maxConcurrentScannerSaves = make(chan struct{}, 4) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | // save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
 | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | // Note that no locking is done when saving.
 | 
					
						
							| 
									
										
										
										
											2020-09-11 00:18:19 +08:00
										 |  |  | func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error { | 
					
						
							| 
									
										
										
										
											2022-12-06 05:01:11 +08:00
										 |  |  | 	var r io.Reader | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 	maxConcurrentScannerSaves <- struct{}{} | 
					
						
							|  |  |  | 	defer func() { | 
					
						
							|  |  |  | 		<-maxConcurrentScannerSaves | 
					
						
							|  |  |  | 	}() | 
					
						
							| 
									
										
										
										
											2022-12-06 05:01:11 +08:00
										 |  |  | 	// If big, do streaming...
 | 
					
						
							|  |  |  | 	size := int64(-1) | 
					
						
							|  |  |  | 	if len(d.Cache) > 10000 { | 
					
						
							|  |  |  | 		pr, pw := io.Pipe() | 
					
						
							|  |  |  | 		go func() { | 
					
						
							|  |  |  | 			pw.CloseWithError(d.serializeTo(pw)) | 
					
						
							|  |  |  | 		}() | 
					
						
							|  |  |  | 		defer pr.Close() | 
					
						
							|  |  |  | 		r = pr | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		var buf bytes.Buffer | 
					
						
							|  |  |  | 		err := d.serializeTo(&buf) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		r = &buf | 
					
						
							|  |  |  | 		size = int64(buf.Len()) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-01-01 01:45:09 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2022-12-06 05:01:11 +08:00
										 |  |  | 	hr, err := hash.NewReader(r, size, "", "", size) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	// Abandon if more than 5 minutes, so we don't hold up scanner.
 | 
					
						
							|  |  |  | 	ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) | 
					
						
							|  |  |  | 	defer cancel() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	_, err = store.PutObject(ctx, | 
					
						
							|  |  |  | 		dataUsageBucket, | 
					
						
							|  |  |  | 		name, | 
					
						
							| 
									
										
										
										
											2022-12-06 05:01:11 +08:00
										 |  |  | 		NewPutObjReader(hr), | 
					
						
							| 
									
										
										
										
											2023-03-10 07:15:46 +08:00
										 |  |  | 		ObjectOptions{NoLock: true}) | 
					
						
							| 
									
										
										
										
											2020-04-28 16:16:57 +08:00
										 |  |  | 	if isErrBucketNotFound(err) { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	return err | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // dataUsageCacheVer indicates the cache version.
 | 
					
						
							|  |  |  | // Bumping the cache version will drop data from previous versions
 | 
					
						
							|  |  |  | // and write new data with the new version.
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | const ( | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	dataUsageCacheVerCurrent = 7 | 
					
						
							|  |  |  | 	dataUsageCacheVerV6      = 6 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 	dataUsageCacheVerV5      = 5 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	dataUsageCacheVerV4      = 4 | 
					
						
							|  |  |  | 	dataUsageCacheVerV3      = 3 | 
					
						
							|  |  |  | 	dataUsageCacheVerV2      = 2 | 
					
						
							|  |  |  | 	dataUsageCacheVerV1      = 1 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | ) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // serialize the contents of the cache.
 | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | func (d *dataUsageCache) serializeTo(dst io.Writer) error { | 
					
						
							|  |  |  | 	// Add version and compress.
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	_, err := dst.Write([]byte{dataUsageCacheVerCurrent}) | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	enc, err := zstd.NewWriter(dst, | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		zstd.WithEncoderLevel(zstd.SpeedFastest), | 
					
						
							|  |  |  | 		zstd.WithWindowSize(1<<20), | 
					
						
							|  |  |  | 		zstd.WithEncoderConcurrency(2)) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	mEnc := msgp.NewWriter(enc) | 
					
						
							|  |  |  | 	err = d.EncodeMsg(mEnc) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	err = mEnc.Flush() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	err = enc.Close() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 		return err | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-12-11 05:03:22 +08:00
										 |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // deserialize the supplied byte slice into the cache.
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | func (d *dataUsageCache) deserialize(r io.Reader) error { | 
					
						
							|  |  |  | 	var b [1]byte | 
					
						
							|  |  |  | 	n, _ := r.Read(b[:]) | 
					
						
							|  |  |  | 	if n != 1 { | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return io.ErrUnexpectedEOF | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-12 23:07:02 +08:00
										 |  |  | 	ver := int(b[0]) | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	switch ver { | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 	case dataUsageCacheVerV1: | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		return errors.New("cache version deprecated (will autoupdate)") | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 	case dataUsageCacheVerV2: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 		dold := &dataUsageCacheV2{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			d.Cache[k] = dataUsageEntry{ | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 				Size:      v.Size, | 
					
						
							|  |  |  | 				Objects:   v.Objects, | 
					
						
							|  |  |  | 				ObjSizes:  v.ObjSizes, | 
					
						
							|  |  |  | 				Children:  v.Children, | 
					
						
							|  |  |  | 				Compacted: len(v.Children) == 0 && k != d.Info.Name, | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV3: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 		dold := &dataUsageCacheV3{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			due := dataUsageEntry{ | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 				Size:     v.Size, | 
					
						
							|  |  |  | 				Objects:  v.Objects, | 
					
						
							|  |  |  | 				ObjSizes: v.ObjSizes, | 
					
						
							|  |  |  | 				Children: v.Children, | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if v.ReplicatedSize > 0 || v.ReplicaSize > 0 || v.ReplicationFailedSize > 0 || v.ReplicationPendingSize > 0 { | 
					
						
							| 
									
										
										
										
											2021-11-20 06:46:14 +08:00
										 |  |  | 				cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name) | 
					
						
							|  |  |  | 				if cfg != nil && cfg.RoleArn != "" { | 
					
						
							|  |  |  | 					due.ReplicationStats = &replicationAllStats{ | 
					
						
							|  |  |  | 						Targets: make(map[string]replicationStats), | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					due.ReplicationStats.ReplicaSize = v.ReplicaSize | 
					
						
							|  |  |  | 					due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{ | 
					
						
							|  |  |  | 						ReplicatedSize: v.ReplicatedSize, | 
					
						
							|  |  |  | 						FailedSize:     v.ReplicationFailedSize, | 
					
						
							|  |  |  | 						PendingSize:    v.ReplicationPendingSize, | 
					
						
							|  |  |  | 					} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			due.Compacted = len(due.Children) == 0 && k != d.Info.Name | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							| 
									
										
										
										
											2021-04-04 00:03:42 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV4: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		dold := &dataUsageCacheV4{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			due := dataUsageEntry{ | 
					
						
							|  |  |  | 				Size:     v.Size, | 
					
						
							|  |  |  | 				Objects:  v.Objects, | 
					
						
							|  |  |  | 				ObjSizes: v.ObjSizes, | 
					
						
							|  |  |  | 				Children: v.Children, | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 			empty := replicationStatsV1{} | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			if v.ReplicationStats != empty { | 
					
						
							| 
									
										
										
										
											2021-11-20 06:46:14 +08:00
										 |  |  | 				cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name) | 
					
						
							|  |  |  | 				if cfg != nil && cfg.RoleArn != "" { | 
					
						
							|  |  |  | 					due.ReplicationStats = &replicationAllStats{ | 
					
						
							|  |  |  | 						Targets: make(map[string]replicationStats), | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{ | 
					
						
							|  |  |  | 						ReplicatedSize: v.ReplicationStats.ReplicatedSize, | 
					
						
							|  |  |  | 						FailedSize:     v.ReplicationStats.FailedSize, | 
					
						
							|  |  |  | 						FailedCount:    v.ReplicationStats.FailedCount, | 
					
						
							|  |  |  | 						PendingSize:    v.ReplicationStats.PendingSize, | 
					
						
							|  |  |  | 						PendingCount:   v.ReplicationStats.PendingCount, | 
					
						
							|  |  |  | 					} | 
					
						
							|  |  |  | 					due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			due.Compacted = len(due.Children) == 0 && k != d.Info.Name | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 		// Populate compacted value and remove unneeded replica stats.
 | 
					
						
							|  |  |  | 		for k, e := range d.Cache { | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 			if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 { | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 				e.ReplicationStats = nil | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 			d.Cache[k] = e | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	case dataUsageCacheVerV5: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							|  |  |  | 		dold := &dataUsageCacheV5{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			due := dataUsageEntry{ | 
					
						
							|  |  |  | 				Size:     v.Size, | 
					
						
							|  |  |  | 				Objects:  v.Objects, | 
					
						
							|  |  |  | 				ObjSizes: v.ObjSizes, | 
					
						
							|  |  |  | 				Children: v.Children, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if v.ReplicationStats != nil && !v.ReplicationStats.Empty() { | 
					
						
							| 
									
										
										
										
											2021-11-20 06:46:14 +08:00
										 |  |  | 				cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name) | 
					
						
							|  |  |  | 				if cfg != nil && cfg.RoleArn != "" { | 
					
						
							|  |  |  | 					due.ReplicationStats = &replicationAllStats{ | 
					
						
							|  |  |  | 						Targets: make(map[string]replicationStats), | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					d.Info.replication = replicationConfig{Config: cfg} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-11-20 06:46:14 +08:00
										 |  |  | 					due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{ | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 						ReplicatedSize: v.ReplicationStats.ReplicatedSize, | 
					
						
							|  |  |  | 						FailedSize:     v.ReplicationStats.FailedSize, | 
					
						
							|  |  |  | 						FailedCount:    v.ReplicationStats.FailedCount, | 
					
						
							|  |  |  | 						PendingSize:    v.ReplicationStats.PendingSize, | 
					
						
							| 
									
										
										
										
											2021-09-23 01:48:45 +08:00
										 |  |  | 						PendingCount:   v.ReplicationStats.PendingCount, | 
					
						
							| 
									
										
										
										
											2021-09-22 00:01:51 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			due.Compacted = len(due.Children) == 0 && k != d.Info.Name | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2021-09-19 04:31:35 +08:00
										 |  |  | 		// Populate compacted value and remove unneeded replica stats.
 | 
					
						
							|  |  |  | 		for k, e := range d.Cache { | 
					
						
							|  |  |  | 			if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 { | 
					
						
							|  |  |  | 				e.ReplicationStats = nil | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 			d.Cache[k] = e | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2021-05-12 23:07:02 +08:00
										 |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2021-10-24 09:38:33 +08:00
										 |  |  | 	case dataUsageCacheVerV6: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							|  |  |  | 		dold := &dataUsageCacheV6{} | 
					
						
							|  |  |  | 		if err = dold.DecodeMsg(msgp.NewReader(dec)); err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		d.Info = dold.Info | 
					
						
							|  |  |  | 		d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) | 
					
						
							|  |  |  | 		for k, v := range dold.Cache { | 
					
						
							|  |  |  | 			var replicationStats *replicationAllStats | 
					
						
							|  |  |  | 			if v.ReplicationStats != nil { | 
					
						
							|  |  |  | 				replicationStats = &replicationAllStats{ | 
					
						
							|  |  |  | 					Targets:     v.ReplicationStats.Targets, | 
					
						
							|  |  |  | 					ReplicaSize: v.ReplicationStats.ReplicaSize, | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			due := dataUsageEntry{ | 
					
						
							|  |  |  | 				Children:         v.Children, | 
					
						
							|  |  |  | 				Size:             v.Size, | 
					
						
							|  |  |  | 				Objects:          v.Objects, | 
					
						
							|  |  |  | 				Versions:         v.Versions, | 
					
						
							|  |  |  | 				ObjSizes:         v.ObjSizes, | 
					
						
							|  |  |  | 				ReplicationStats: replicationStats, | 
					
						
							|  |  |  | 				Compacted:        v.Compacted, | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			d.Cache[k] = due | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		return nil | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	case dataUsageCacheVerCurrent: | 
					
						
							|  |  |  | 		// Zstd compressed.
 | 
					
						
							|  |  |  | 		dec, err := zstd.NewReader(r, zstd.WithDecoderConcurrency(2)) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		defer dec.Close() | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 		return d.DecodeMsg(msgp.NewReader(dec)) | 
					
						
							| 
									
										
										
										
											2021-05-12 23:07:02 +08:00
										 |  |  | 	default: | 
					
						
							|  |  |  | 		return fmt.Errorf("dataUsageCache: unknown version: %d", ver) | 
					
						
							| 
									
										
										
										
											2021-01-14 01:58:08 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Trim this from start+end of hashes.
 | 
					
						
							|  |  |  | var hashPathCutSet = dataUsageRoot | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | func init() { | 
					
						
							|  |  |  | 	if dataUsageRoot != string(filepath.Separator) { | 
					
						
							|  |  |  | 		hashPathCutSet = dataUsageRoot + string(filepath.Separator) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // hashPath calculates a hash of the provided string.
 | 
					
						
							|  |  |  | func hashPath(data string) dataUsageHash { | 
					
						
							|  |  |  | 	if data != dataUsageRoot { | 
					
						
							|  |  |  | 		data = strings.Trim(data, hashPathCutSet) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return dataUsageHash(path.Clean(data)) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | //msgp:ignore dataUsageHashMap
 | 
					
						
							|  |  |  | type dataUsageHashMap map[string]struct{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // DecodeMsg implements msgp.Decodable
 | 
					
						
							|  |  |  | func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) { | 
					
						
							|  |  |  | 	var zb0002 uint32 | 
					
						
							|  |  |  | 	zb0002, err = dc.ReadArrayHeader() | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 		err = msgp.WrapError(err) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	if zb0002 == 0 { | 
					
						
							|  |  |  | 		*z = nil | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	*z = make(dataUsageHashMap, zb0002) | 
					
						
							|  |  |  | 	for i := uint32(0); i < zb0002; i++ { | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			var zb0003 string | 
					
						
							|  |  |  | 			zb0003, err = dc.ReadString() | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				err = msgp.WrapError(err) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			(*z)[zb0003] = struct{}{} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | // EncodeMsg implements msgp.Encodable
 | 
					
						
							|  |  |  | func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) { | 
					
						
							|  |  |  | 	err = en.WriteArrayHeader(uint32(len(z))) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		err = msgp.WrapError(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	for zb0004 := range z { | 
					
						
							|  |  |  | 		err = en.WriteString(zb0004) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 			err = msgp.WrapError(err, zb0004) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // MarshalMsg implements msgp.Marshaler
 | 
					
						
							|  |  |  | func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) { | 
					
						
							|  |  |  | 	o = msgp.Require(b, z.Msgsize()) | 
					
						
							|  |  |  | 	o = msgp.AppendArrayHeader(o, uint32(len(z))) | 
					
						
							|  |  |  | 	for zb0004 := range z { | 
					
						
							|  |  |  | 		o = msgp.AppendString(o, zb0004) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | // UnmarshalMsg implements msgp.Unmarshaler
 | 
					
						
							|  |  |  | func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) { | 
					
						
							|  |  |  | 	var zb0002 uint32 | 
					
						
							|  |  |  | 	zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		err = msgp.WrapError(err) | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2021-05-12 09:36:15 +08:00
										 |  |  | 	if zb0002 == 0 { | 
					
						
							|  |  |  | 		*z = nil | 
					
						
							|  |  |  | 		return bts, nil | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	*z = make(dataUsageHashMap, zb0002) | 
					
						
							|  |  |  | 	for i := uint32(0); i < zb0002; i++ { | 
					
						
							|  |  |  | 		{ | 
					
						
							|  |  |  | 			var zb0003 string | 
					
						
							|  |  |  | 			zb0003, bts, err = msgp.ReadStringBytes(bts) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				err = msgp.WrapError(err) | 
					
						
							|  |  |  | 				return | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			(*z)[zb0003] = struct{}{} | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2020-06-13 01:28:21 +08:00
										 |  |  | 	o = bts | 
					
						
							|  |  |  | 	return | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
 | 
					
						
							|  |  |  | func (z dataUsageHashMap) Msgsize() (s int) { | 
					
						
							|  |  |  | 	s = msgp.ArrayHeaderSize | 
					
						
							|  |  |  | 	for zb0004 := range z { | 
					
						
							|  |  |  | 		s += msgp.StringPrefixSize + len(zb0004) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return | 
					
						
							| 
									
										
										
										
											2020-03-19 07:19:29 +08:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2022-07-06 05:45:49 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | //msgp:encode ignore currentScannerCycle
 | 
					
						
							|  |  |  | //msgp:decode ignore currentScannerCycle
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | type currentScannerCycle struct { | 
					
						
							|  |  |  | 	current        uint64 | 
					
						
							|  |  |  | 	next           uint64 | 
					
						
							|  |  |  | 	started        time.Time | 
					
						
							|  |  |  | 	cycleCompleted []time.Time | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // clone returns a clone.
 | 
					
						
							|  |  |  | func (z currentScannerCycle) clone() currentScannerCycle { | 
					
						
							|  |  |  | 	z.cycleCompleted = append(make([]time.Time, 0, len(z.cycleCompleted)), z.cycleCompleted...) | 
					
						
							|  |  |  | 	return z | 
					
						
							|  |  |  | } |