mirror of https://github.com/minio/minio.git
Compare commits
6 Commits
177a6c5182
...
884288ca36
Author | SHA1 | Date |
---|---|---|
|
884288ca36 | |
|
f1dff34ccc | |
|
b9f0e8c712 | |
|
7ced9663e6 | |
|
50fcf9b670 | |
|
64f5c6103f |
|
@ -223,7 +223,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
|||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.CommitBinary(ctx)
|
||||
}, idx, *client.host)
|
||||
|
@ -269,7 +269,6 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
|||
if failedClients[idx] {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
prs, ok := peerResults[client.String()]
|
||||
// We restart only on success, not for any failures.
|
||||
|
|
|
@ -154,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
|||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
|
||||
}, index)
|
||||
|
|
|
@ -511,7 +511,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
|
|||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
// Sleep and stagger to avoid blocked CPU and thundering
|
||||
// herd upon start up sequence.
|
||||
|
|
|
@ -2555,7 +2555,7 @@ func proxyTaggingToRepTarget(ctx context.Context, bucket, object string, tags *t
|
|||
if tgt.disableProxy {
|
||||
continue
|
||||
}
|
||||
idx := idx
|
||||
|
||||
wg.Add(1)
|
||||
go func(idx int, tgt *TargetClient) {
|
||||
defer wg.Done()
|
||||
|
@ -2624,7 +2624,7 @@ func proxyGetTaggingToRepTarget(ctx context.Context, bucket, object string, opts
|
|||
if tgt.disableProxy {
|
||||
continue
|
||||
}
|
||||
idx := idx
|
||||
|
||||
wg.Add(1)
|
||||
go func(idx int, tgt *TargetClient) {
|
||||
defer wg.Done()
|
||||
|
|
|
@ -116,7 +116,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
|
|||
func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
// we ignore disk not found errors
|
||||
|
@ -868,7 +867,7 @@ func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix
|
|||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
entries, err := storageDisks[index].ListDir(ctx, "", bucket, prefix, 1)
|
||||
if err != nil {
|
||||
|
|
|
@ -204,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
|
|||
g := errgroup.WithNErrs(len(disks))
|
||||
// Read `xl.meta` in parallel across disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -408,7 +408,6 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc
|
|||
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -433,7 +432,7 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc
|
|||
if mErrs[index] != nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
ng.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -121,7 +121,7 @@ func (er erasureObjects) cleanupMultipartPath(ctx context.Context, paths ...stri
|
|||
if disk == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
_ = storageDisks[index].DeleteBulk(ctx, minioMetaMultipartBucket, paths...)
|
||||
return nil
|
||||
|
@ -540,7 +540,6 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src
|
|||
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -820,7 +819,6 @@ func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI
|
|||
objectParts := make([][]string, len(onlineDisks))
|
||||
// List uploaded parts from drives.
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if onlineDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -995,7 +993,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
|
|||
objectPartInfos := make([][]*ObjectPartInfo, len(disks))
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -542,7 +542,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
|
|||
disks := er.getDisks()
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -575,7 +574,6 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
|
|||
rawFileInfos := make([]RawFileInfo, len(disks))
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -828,6 +826,13 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
|||
minDisks = er.setDriveCount - er.defaultParityCount
|
||||
}
|
||||
|
||||
if minDisks == er.setDriveCount/2 {
|
||||
// when data and parity are same we must atleast
|
||||
// wait for response from 1 extra drive to avoid
|
||||
// split-brain.
|
||||
minDisks++
|
||||
}
|
||||
|
||||
calcQuorum := func(metaArr []FileInfo, errs []error) (FileInfo, []FileInfo, []StorageAPI, time.Time, string, error) {
|
||||
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
|
@ -1022,7 +1027,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
|
|||
dataDirs := make([]string, len(disks))
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -1058,7 +1062,7 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
|
|||
if nerr != nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
// When we are going to return error, attempt to delete success
|
||||
// on some of the drives, if we cannot we do not have to notify
|
||||
// caller this dangling object will be now scheduled to be removed
|
||||
|
@ -1624,7 +1628,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
|
|||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -1829,7 +1832,6 @@ func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object
|
|||
}
|
||||
g := errgroup.WithNErrs(len(onlineDisks))
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if onlineDisks[index] == nil {
|
||||
return nil
|
||||
|
@ -1855,7 +1857,6 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string
|
|||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return nil
|
||||
|
@ -2307,7 +2308,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o
|
|||
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if onlineDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -420,7 +420,7 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b
|
|||
nSets := make([]int, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
|
||||
// Skip suspended pools or pools participating in rebalance for any new
|
||||
// I/O.
|
||||
if z.IsSuspended(index) || z.IsPoolRebalancing(index) {
|
||||
|
@ -660,7 +660,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
|
|||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return z.serverPools[index].Shutdown(ctx)
|
||||
}, index)
|
||||
|
@ -712,7 +711,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool)
|
|||
storageInfos := make([]StorageInfo, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics)
|
||||
return nil
|
||||
|
|
|
@ -593,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo {
|
|||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].StorageInfo(ctx)
|
||||
return nil
|
||||
|
@ -618,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag
|
|||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics)
|
||||
return nil
|
||||
|
@ -641,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
|
|||
g := errgroup.WithNErrs(len(s.sets))
|
||||
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return s.sets[index].Shutdown(ctx)
|
||||
}, index)
|
||||
|
@ -705,7 +702,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
|
|||
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
// we ignore disk not found errors
|
||||
|
|
|
@ -175,7 +175,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
|
|||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
di := madmin.Disk{
|
||||
Endpoint: endpoints[index].String(),
|
||||
|
|
|
@ -324,7 +324,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
|
|||
|
||||
// Load format from each disk in parallel
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -530,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format
|
|||
|
||||
// Write `format.json` to all disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if formats[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -566,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag
|
|||
storageDisks := make([]StorageAPI, len(endpoints))
|
||||
g := errgroup.WithNErrs(len(endpoints))
|
||||
for index := range endpoints {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
storageDisks[index], err = newStorageAPI(endpoints[index], opts)
|
||||
return err
|
||||
|
|
|
@ -294,7 +294,6 @@ func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IA
|
|||
g := errgroup.WithNErrs(len(users))
|
||||
|
||||
for index := range users {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
userName := path.Dir(users[index])
|
||||
user, err := iamOS.loadUserIdentity(ctx, userName, userType)
|
||||
|
@ -413,7 +412,6 @@ func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, use
|
|||
g := errgroup.WithNErrs(len(users))
|
||||
|
||||
for index := range users {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
userName := strings.TrimSuffix(users[index], ".json")
|
||||
userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup)
|
||||
|
@ -538,7 +536,6 @@ func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, polici
|
|||
g := errgroup.WithNErrs(len(policies))
|
||||
|
||||
for index := range policies {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
policyName := path.Dir(policies[index])
|
||||
policyDoc, err := iamOS.loadPolicy(ctx, policyName)
|
||||
|
|
|
@ -400,7 +400,6 @@ func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool,
|
|||
g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time.
|
||||
|
||||
for index := range groups {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap)
|
||||
if err != nil && !errors.Is(err, errNoSuchPolicy) {
|
||||
|
|
35
cmd/iam.go
35
cmd/iam.go
|
@ -24,6 +24,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"path"
|
||||
"sort"
|
||||
|
@ -366,14 +367,11 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
|||
sys.rolesMap = make(map[arn.ARN]string)
|
||||
|
||||
// From OpenID
|
||||
if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil {
|
||||
sys.validateAndAddRolePolicyMappings(ctx, riMap)
|
||||
}
|
||||
maps.Copy(sys.rolesMap, sys.OpenIDConfig.GetRoleInfo())
|
||||
|
||||
// From AuthN plugin if enabled.
|
||||
if authn := newGlobalAuthNPluginFn(); authn != nil {
|
||||
riMap := authn.GetRoleInfo()
|
||||
sys.validateAndAddRolePolicyMappings(ctx, riMap)
|
||||
maps.Copy(sys.rolesMap, authn.GetRoleInfo())
|
||||
}
|
||||
|
||||
sys.printIAMRoles()
|
||||
|
@ -501,33 +499,6 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
|
|||
}
|
||||
}
|
||||
|
||||
func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) {
|
||||
// Validate that policies associated with roles are defined. If
|
||||
// authZ plugin is set, role policies are just claims sent to
|
||||
// the plugin and they need not exist.
|
||||
//
|
||||
// If some mapped policies do not exist, we print some error
|
||||
// messages but continue any way - they can be fixed in the
|
||||
// running server by creating the policies after start up.
|
||||
for arn, rolePolicies := range m {
|
||||
specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet()
|
||||
validPolicies, _ := sys.store.MergePolicies(rolePolicies)
|
||||
knownPoliciesSet := newMappedPolicy(validPolicies).policySet()
|
||||
unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet)
|
||||
if len(unknownPoliciesSet) > 0 {
|
||||
authz := newGlobalAuthZPluginFn()
|
||||
if authz == nil {
|
||||
// Print a warning that some policies mapped to a role are not defined.
|
||||
errMsg := fmt.Errorf(
|
||||
"The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.",
|
||||
unknownPoliciesSet.ToSlice(), arn.String())
|
||||
authZLogIf(ctx, errMsg, logger.WarningKind)
|
||||
}
|
||||
}
|
||||
sys.rolesMap[arn] = rolePolicies
|
||||
}
|
||||
}
|
||||
|
||||
// Prints IAM role ARNs.
|
||||
func (sys *IAMSys) printIAMRoles() {
|
||||
if len(sys.rolesMap) == 0 {
|
||||
|
|
|
@ -155,7 +155,6 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
|
|||
func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -170,7 +169,6 @@ func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string)
|
|||
func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -185,7 +183,6 @@ func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) [
|
|||
func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -200,7 +197,6 @@ func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup s
|
|||
func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -215,7 +211,6 @@ func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []
|
|||
func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp bool) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -230,7 +225,6 @@ func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp
|
|||
func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -245,7 +239,6 @@ func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []Notif
|
|||
func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -260,7 +253,6 @@ func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey
|
|||
func (sys *NotificationSys) LoadServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -276,8 +268,6 @@ func (sys *NotificationSys) BackgroundHealStatus(ctx context.Context) ([]madmin.
|
|||
ng := WithNPeers(len(sys.peerClients))
|
||||
states := make([]madmin.BgHealState, len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
idx := idx
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -301,7 +291,7 @@ func (sys *NotificationSys) StartProfiling(ctx context.Context, profiler string)
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.StartProfiling(ctx, profiler)
|
||||
}, idx, *client.host)
|
||||
|
@ -409,7 +399,7 @@ func (sys *NotificationSys) VerifyBinary(ctx context.Context, u *url.URL, sha256
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.VerifyBinary(ctx, u, sha256Sum, releaseInfo, bytes.NewReader(bin))
|
||||
}, idx, *client.host)
|
||||
|
@ -424,7 +414,7 @@ func (sys *NotificationSys) CommitBinary(ctx context.Context) []NotificationPeer
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.CommitBinary(ctx)
|
||||
}, idx, *client.host)
|
||||
|
@ -439,7 +429,7 @@ func (sys *NotificationSys) SignalConfigReload(subSys string) []NotificationPeer
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(GlobalContext, func() error {
|
||||
return client.SignalService(serviceReloadDynamic, subSys, false, nil)
|
||||
}, idx, *client.host)
|
||||
|
@ -454,7 +444,7 @@ func (sys *NotificationSys) SignalService(sig serviceSignal) []NotificationPeerE
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(GlobalContext, func() error {
|
||||
// force == true preserves the current behavior
|
||||
return client.SignalService(sig, "", false, nil)
|
||||
|
@ -470,7 +460,7 @@ func (sys *NotificationSys) SignalServiceV2(sig serviceSignal, dryRun bool, exec
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(GlobalContext, func() error {
|
||||
return client.SignalService(sig, "", dryRun, execAt)
|
||||
}, idx, *client.host)
|
||||
|
@ -485,8 +475,6 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe
|
|||
locksResp := make([]*PeerLocks, len(sys.peerClients))
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
index := index
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -522,7 +510,7 @@ func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName s
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.LoadBucketMetadata(ctx, bucketName)
|
||||
}, idx, *client.host)
|
||||
|
@ -552,7 +540,7 @@ func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.DeleteBucketMetadata(ctx, bucketName)
|
||||
}, idx, *client.host)
|
||||
|
@ -570,8 +558,6 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck
|
|||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
replicationStats := make([]BucketStatsMap, len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
index := index
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -612,8 +598,6 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam
|
|||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
bucketStats := make([]BucketStats, len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
index := index
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -647,8 +631,6 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri
|
|||
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
|
||||
siteStats := make([]SRMetricsSummary, len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
index := index
|
||||
client := client
|
||||
ng.Go(ctx, func() error {
|
||||
if client == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -678,7 +660,7 @@ func (sys *NotificationSys) ReloadPoolMeta(ctx context.Context) {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.ReloadPoolMeta(ctx)
|
||||
}, idx, *client.host)
|
||||
|
@ -699,7 +681,7 @@ func (sys *NotificationSys) DeleteUploadID(ctx context.Context, uploadID string)
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.DeleteUploadID(ctx, uploadID)
|
||||
}, idx, *client.host)
|
||||
|
@ -726,7 +708,7 @@ func (sys *NotificationSys) StopRebalance(ctx context.Context) {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.StopRebalance(ctx)
|
||||
}, idx, *client.host)
|
||||
|
@ -752,7 +734,7 @@ func (sys *NotificationSys) LoadRebalanceMeta(ctx context.Context, startRebalanc
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.LoadRebalanceMeta(ctx, startRebalance)
|
||||
}, idx, *client.host)
|
||||
|
@ -773,7 +755,7 @@ func (sys *NotificationSys) LoadTransitionTierConfig(ctx context.Context) {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(ctx, func() error {
|
||||
return client.LoadTransitionTierConfig(ctx)
|
||||
}, idx, *client.host)
|
||||
|
@ -795,7 +777,7 @@ func (sys *NotificationSys) GetCPUs(ctx context.Context) []madmin.CPUs {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetCPUs(ctx)
|
||||
|
@ -820,7 +802,7 @@ func (sys *NotificationSys) GetNetInfo(ctx context.Context) []madmin.NetInfo {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetNetInfo(ctx)
|
||||
|
@ -845,7 +827,7 @@ func (sys *NotificationSys) GetPartitions(ctx context.Context) []madmin.Partitio
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetPartitions(ctx)
|
||||
|
@ -870,7 +852,7 @@ func (sys *NotificationSys) GetOSInfo(ctx context.Context) []madmin.OSInfo {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetOSInfo(ctx)
|
||||
|
@ -902,7 +884,6 @@ func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType,
|
|||
}
|
||||
}
|
||||
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetMetrics(ctx, t, opts)
|
||||
|
@ -926,7 +907,6 @@ func (sys *NotificationSys) GetResourceMetrics(ctx context.Context) <-chan Metri
|
|||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
|
||||
for index := range sys.peerClients {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if sys.peerClients[index] == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -949,7 +929,7 @@ func (sys *NotificationSys) GetSysConfig(ctx context.Context) []madmin.SysConfig
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetSysConfig(ctx)
|
||||
|
@ -975,7 +955,7 @@ func (sys *NotificationSys) GetSysServices(ctx context.Context) []madmin.SysServ
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetSELinuxInfo(ctx)
|
||||
|
@ -1009,7 +989,7 @@ func (sys *NotificationSys) GetSysErrors(ctx context.Context) []madmin.SysErrors
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetSysErrors(ctx)
|
||||
|
@ -1034,7 +1014,7 @@ func (sys *NotificationSys) GetMemInfo(ctx context.Context) []madmin.MemInfo {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetMemInfo(ctx)
|
||||
|
@ -1059,7 +1039,7 @@ func (sys *NotificationSys) GetProcInfo(ctx context.Context) []madmin.ProcInfo {
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].GetProcInfo(ctx)
|
||||
|
@ -1214,7 +1194,7 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...
|
|||
if sys.peerClients[index] == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reports[index], err = sys.peerClients[index].MonitorBandwidth(ctx, buckets)
|
||||
|
@ -1302,7 +1282,6 @@ func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan MetricV
|
|||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
|
||||
for index := range sys.peerClients {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if sys.peerClients[index] == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -1323,7 +1302,6 @@ func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan Metric
|
|||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
|
||||
for index := range sys.peerClients {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if sys.peerClients[index] == nil {
|
||||
return errPeerNotReachable
|
||||
|
@ -1353,7 +1331,7 @@ func (sys *NotificationSys) ServiceFreeze(ctx context.Context, freeze bool) []No
|
|||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
|
||||
ng.Go(GlobalContext, func() error {
|
||||
return client.SignalService(serviceSig, "", false, nil)
|
||||
}, idx, *client.host)
|
||||
|
@ -1580,7 +1558,7 @@ func (sys *NotificationSys) GetReplicationMRF(ctx context.Context, bucket, node
|
|||
if host != node && node != "all" {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
peerChannels[index], err = sys.peerClients[index].GetReplicationMRF(ctx, bucket)
|
||||
|
|
|
@ -331,7 +331,7 @@ func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
|||
func ifModifiedSince(objTime time.Time, givenTime time.Time) bool {
|
||||
// The Date-Modified header truncates sub-second precision, so
|
||||
// use mtime < t+1s instead of mtime <= t to check for unmodified.
|
||||
return objTime.After(givenTime.Add(1 * time.Second))
|
||||
return !objTime.Before(givenTime.Add(1 * time.Second))
|
||||
}
|
||||
|
||||
// canonicalizeETag returns ETag with leading and trailing double-quotes removed,
|
||||
|
|
|
@ -113,8 +113,6 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin
|
|||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
|
||||
for idx, client := range sys.peerClients {
|
||||
idx := idx
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
|
@ -148,8 +146,6 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin
|
|||
g = errgroup.WithNErrs(len(sys.peerClients))
|
||||
healBucketResults := make([]madmin.HealResultItem, len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
idx := idx
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
|
@ -207,8 +203,6 @@ func (sys *S3PeerSys) ListBuckets(ctx context.Context, opts BucketOptions) ([]Bu
|
|||
nodeBuckets := make([][]BucketInfo, len(sys.peerClients))
|
||||
|
||||
for idx, client := range sys.peerClients {
|
||||
idx := idx
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
|
@ -295,8 +289,6 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc
|
|||
|
||||
bucketInfos := make([]BucketInfo, len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
idx := idx
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
|
@ -401,7 +393,6 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri
|
|||
func (sys *S3PeerSys) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error {
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
|
@ -448,7 +439,6 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string,
|
|||
func (sys *S3PeerSys) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
|
|
|
@ -47,7 +47,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (
|
|||
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range localDrives {
|
||||
index := index
|
||||
g.Go(func() (serr error) {
|
||||
if localDrives[index] == nil {
|
||||
beforeState[index] = madmin.DriveStateOffline
|
||||
|
@ -111,7 +110,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (
|
|||
if !isMinioMetaBucketName(bucket) && !isAllBucketsNotFound(errs) && opts.Remove {
|
||||
g := errgroup.WithNErrs(len(localDrives))
|
||||
for index := range localDrives {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if localDrives[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -131,7 +129,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (
|
|||
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range localDrives {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if beforeState[index] == madmin.DriveStateMissing {
|
||||
err := localDrives[index].MakeVol(ctx, bucket)
|
||||
|
@ -225,7 +222,6 @@ func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions)
|
|||
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range localDrives {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if localDrives[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -273,7 +269,6 @@ func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOpti
|
|||
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range localDrives {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if localDrives[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -294,7 +289,6 @@ func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions)
|
|||
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range localDrives {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if localDrives[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -545,6 +545,14 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ
|
|||
writeSTSErrorResponse(ctx, w, ErrSTSAccessDenied, err)
|
||||
return
|
||||
}
|
||||
if newGlobalAuthZPluginFn() == nil {
|
||||
// if authZ is not set - we expect the policies to be present.
|
||||
if globalIAMSys.CurrentPolicies(p) == "" {
|
||||
writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue,
|
||||
fmt.Errorf("None of the given policies (`%s`) are defined, credentials will not be generated", p))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.doesPolicyAllow(p, policy.Args{
|
||||
|
@ -1003,6 +1011,20 @@ func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
_, policyName, err := globalIAMSys.GetRolePolicy(roleArnStr)
|
||||
if err != nil {
|
||||
writeSTSErrorResponse(ctx, w, ErrSTSAccessDenied, err)
|
||||
return
|
||||
}
|
||||
|
||||
if newGlobalAuthZPluginFn() == nil { // if authZ is not set - we expect the policyname to be present.
|
||||
if globalIAMSys.CurrentPolicies(policyName) == "" {
|
||||
writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue,
|
||||
fmt.Errorf("None of the given policies (`%s`) are defined, credentials will not be generated", policyName))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
res, err := authn.Authenticate(roleArn, token)
|
||||
if err != nil {
|
||||
writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err)
|
||||
|
|
|
@ -2,7 +2,7 @@ version: '3.7'
|
|||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:RELEASE.2025-07-18T21-56-31Z
|
||||
image: quay.io/minio/minio:RELEASE.2025-07-23T15-54-02Z
|
||||
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
|
|
Loading…
Reference in New Issue