Run modernize (#21546)

`go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...` executed.

`go generate ./...` ran afterwards to keep generated.
This commit is contained in:
Klaus Post 2025-08-29 04:39:48 +02:00 committed by GitHub
parent 3b7cb6512c
commit f0b91e5504
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
238 changed files with 913 additions and 1257 deletions

View File

@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
} }
} }
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error { func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error {
data, err := io.ReadAll(body) data, err := io.ReadAll(body)
if err != nil { if err != nil {
return SRError{ return SRError{

View File

@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
// Create a policy policy // Create a policy policy
policy := "mypolicy" policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
] ]
} }
] ]
}`, bucket)) }`, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
userCount := 50 userCount := 50
accessKeys := make([]string, userCount) accessKeys := make([]string, userCount)
secretKeys := make([]string, userCount) secretKeys := make([]string, userCount)
for i := 0; i < userCount; i++ { for i := range userCount {
accessKey, secretKey := mustGenerateCredentials(c) accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled) err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil { if err != nil {
@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
} }
g := errgroup.Group{} g := errgroup.Group{}
for i := 0; i < userCount; i++ { for i := range userCount {
g.Go(func(i int) func() error { g.Go(func(i int) func() error {
return func() error { return func() error {
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "") uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")

View File

@ -24,6 +24,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"net/http" "net/http"
"os" "os"
"slices" "slices"
@ -157,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
for k, v := range ldapUsers { maps.Copy(allCredentials, ldapUsers)
allCredentials[k] = v
}
// Marshal the response // Marshal the response
data, err := json.Marshal(allCredentials) data, err := json.Marshal(allCredentials)
@ -2949,7 +2948,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.
name: createReq.Name, name: createReq.Name,
description: description, description: description,
expiration: createReq.Expiration, expiration: createReq.Expiration,
claims: make(map[string]interface{}), claims: make(map[string]any),
} }
condValues := getConditionValues(r, "", cred) condValues := getConditionValues(r, "", cred)

View File

@ -332,7 +332,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
// 2.2 create and associate policy to user // 2.2 create and associate policy to user
policy := "mypolicy-test-user-update" policy := "mypolicy-test-user-update"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -355,7 +355,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -562,7 +562,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
// 1. Create a policy // 1. Create a policy
policy := "mypolicy" policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -585,7 +585,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -680,7 +680,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
c.Fatalf("bucket creat error: %v", err) c.Fatalf("bucket creat error: %v", err)
} }
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -703,7 +703,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
// Check that default policies can be overwritten. // Check that default policies can be overwritten.
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes) err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
@ -739,7 +739,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
} }
policy := "mypolicy" policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -762,7 +762,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -911,7 +911,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
// Create policy, user and associate policy // Create policy, user and associate policy
policy := "mypolicy" policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -934,7 +934,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -995,7 +995,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
// Create policy, user and associate policy // Create policy, user and associate policy
policy := "mypolicy" policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -1026,7 +1026,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -1093,7 +1093,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
// Create policy, user and associate policy // Create policy, user and associate policy
policy := "mypolicy" policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -1116,7 +1116,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
] ]
} }
] ]
}`, bucket, bucket)) }`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil { if err != nil {
c.Fatalf("policy add error: %v", err) c.Fatalf("policy add error: %v", err)
@ -1367,7 +1367,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
svcAK, svcSK := mustGenerateCredentials(c) svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects. // This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -1381,7 +1381,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
] ]
} }
] ]
}`, bucket)) }`, bucket)
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes, Policy: policyBytes,
TargetUser: accessKey, TargetUser: accessKey,
@ -1558,7 +1558,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string { func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
c.Helper() c.Helper()
versions := []string{} versions := []string{}
for i := 0; i < 5; i++ { for range 5 {
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{}) ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil { if err != nil {
c.Fatalf("upload did not succeed got %#v", err) c.Fatalf("upload did not succeed got %#v", err)
@ -1627,7 +1627,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
svcAK, svcSK := mustGenerateCredentials(c) svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects. // This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{ policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -1641,7 +1641,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
] ]
} }
] ]
}`, bucket)) }`, bucket)
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes, Policy: policyBytes,
TargetUser: accessKey, TargetUser: accessKey,
@ -1655,7 +1655,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
c.mustNotListObjects(ctx, svcClient, bucket) c.mustNotListObjects(ctx, svcClient, bucket)
// This policy allows listing objects. // This policy allows listing objects.
newPolicyBytes := []byte(fmt.Sprintf(`{ newPolicyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
{ {
@ -1668,7 +1668,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
] ]
} }
] ]
}`, bucket)) }`, bucket)
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{ err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
NewPolicy: newPolicyBytes, NewPolicy: newPolicyBytes,
}) })

View File

@ -954,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
var args dsync.LockArgs var args dsync.LockArgs
var lockers []dsync.NetLocker var lockers []dsync.NetLocker
for _, path := range strings.Split(vars["paths"], ",") { for path := range strings.SplitSeq(vars["paths"], ",") {
if path == "" { if path == "" {
continue continue
} }
@ -1193,7 +1193,7 @@ type dummyFileInfo struct {
mode os.FileMode mode os.FileMode
modTime time.Time modTime time.Time
isDir bool isDir bool
sys interface{} sys any
} }
func (f dummyFileInfo) Name() string { return f.name } func (f dummyFileInfo) Name() string { return f.name }
@ -1201,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size }
func (f dummyFileInfo) Mode() os.FileMode { return f.mode } func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
func (f dummyFileInfo) ModTime() time.Time { return f.modTime } func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
func (f dummyFileInfo) IsDir() bool { return f.isDir } func (f dummyFileInfo) IsDir() bool { return f.isDir }
func (f dummyFileInfo) Sys() interface{} { return f.sys } func (f dummyFileInfo) Sys() any { return f.sys }
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download // DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
// ---------- // ----------

View File

@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool {
func TestTopLockEntries(t *testing.T) { func TestTopLockEntries(t *testing.T) {
locksHeld := make(map[string][]lockRequesterInfo) locksHeld := make(map[string][]lockRequesterInfo)
var owners []string var owners []string
for i := 0; i < 4; i++ { for i := range 4 {
owners = append(owners, fmt.Sprintf("node-%d", i)) owners = append(owners, fmt.Sprintf("node-%d", i))
} }
@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) {
// request UID, but 10 different resource names associated with it. // request UID, but 10 different resource names associated with it.
var lris []lockRequesterInfo var lris []lockRequesterInfo
uuid := mustGetUUID() uuid := mustGetUUID()
for i := 0; i < 10; i++ { for i := range 10 {
resource := fmt.Sprintf("bucket/delete-object-%d", i) resource := fmt.Sprintf("bucket/delete-object-%d", i)
lri := lockRequesterInfo{ lri := lockRequesterInfo{
Name: resource, Name: resource,
@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) {
} }
// Add a few concurrent read locks to the mix // Add a few concurrent read locks to the mix
for i := 0; i < 50; i++ { for i := range 50 {
resource := fmt.Sprintf("bucket/get-object-%d", i) resource := fmt.Sprintf("bucket/get-object-%d", i)
lri := lockRequesterInfo{ lri := lockRequesterInfo{
Name: resource, Name: resource,

View File

@ -22,6 +22,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"maps"
"net/http" "net/http"
"sort" "sort"
"sync" "sync"
@ -520,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value // Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap)) retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
for k, v := range h.scannedItemsMap { maps.Copy(retMap, h.scannedItemsMap)
retMap[k] = v
}
return retMap return retMap
} }
@ -534,9 +533,7 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value // Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap)) retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
for k, v := range h.healedItemsMap { maps.Copy(retMap, h.healedItemsMap)
retMap[k] = v
}
return retMap return retMap
} }
@ -549,9 +546,7 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value // Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap)) retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap { maps.Copy(retMap, h.healFailedItemsMap)
retMap[k] = v
}
return retMap return retMap
} }

View File

@ -65,7 +65,7 @@ func setCommonHeaders(w http.ResponseWriter) {
} }
// Encodes the response headers into XML format. // Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte { func encodeResponse(response any) []byte {
var buf bytes.Buffer var buf bytes.Buffer
buf.WriteString(xml.Header) buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil { if err := xml.NewEncoder(&buf).Encode(response); err != nil {
@ -83,7 +83,7 @@ func encodeResponse(response interface{}) []byte {
// Do not use this function for anything other than ListObjects() // Do not use this function for anything other than ListObjects()
// variants, please open a github discussion if you wish to use // variants, please open a github discussion if you wish to use
// this in other places. // this in other places.
func encodeResponseList(response interface{}) []byte { func encodeResponseList(response any) []byte {
var buf bytes.Buffer var buf bytes.Buffer
buf.WriteString(xxml.Header) buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil { if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
@ -94,7 +94,7 @@ func encodeResponseList(response interface{}) []byte {
} }
// Encodes the response headers into JSON format. // Encodes the response headers into JSON format.
func encodeResponseJSON(response interface{}) []byte { func encodeResponseJSON(response any) []byte {
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
e := json.NewEncoder(&bytesBuffer) e := json.NewEncoder(&bytesBuffer)
e.Encode(response) e.Encode(response)

View File

@ -100,7 +100,6 @@ func TestObjectLocation(t *testing.T) {
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object) gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation { if testCase.expectedLocation != gotLocation {

View File

@ -216,7 +216,7 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return // Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty. // errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} { func mustGetClaimsFromToken(r *http.Request) map[string]any {
claims, _ := getClaimsFromToken(getSessionToken(r)) claims, _ := getClaimsFromToken(getSessionToken(r))
return claims return claims
} }
@ -266,7 +266,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error)
} }
// Fetch claims in the security token returned by the client. // Fetch claims in the security token returned by the client.
func getClaimsFromToken(token string) (map[string]interface{}, error) { func getClaimsFromToken(token string) (map[string]any, error) {
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey) jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
if err != nil { if err != nil {
return nil, err return nil, err
@ -275,7 +275,7 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
} }
// Fetch claims in the security token returned by the client and validate the token. // Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) { func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) {
token := getSessionToken(r) token := getSessionToken(r)
if token != "" && cred.AccessKey == "" { if token != "" && cred.AccessKey == "" {
// x-amz-security-token is not allowed for anonymous access. // x-amz-security-token is not allowed for anonymous access.

View File

@ -24,6 +24,7 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"slices"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -269,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
func (h *healingTracker) isHealed(bucket string) bool { func (h *healingTracker) isHealed(bucket string) bool {
h.mu.RLock() h.mu.RLock()
defer h.mu.RUnlock() defer h.mu.RUnlock()
for _, v := range h.HealedBuckets { return slices.Contains(h.HealedBuckets, bucket)
if v == bucket {
return true
}
}
return false
} }
// resume will reset progress to the numbers at the start of the bucket. // resume will reset progress to the numbers at the start of the bucket.

View File

@ -25,6 +25,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@ -574,9 +575,7 @@ func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
} }
for k, v := range objInfo.UserMetadata { maps.Copy(oi.UserDefined, objInfo.UserMetadata)
oi.UserDefined[k] = v
}
return oi return oi
} }

View File

@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error {
type BatchJobSize int64 type BatchJobSize int64
// UnmarshalYAML to parse humanized byte values // UnmarshalYAML to parse humanized byte values
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error { func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error {
var batchExpireSz string var batchExpireSz string
err := unmarshal(&batchExpireSz) err := unmarshal(&batchExpireSz)
if err != nil { if err != nil {

View File

@ -21,6 +21,7 @@ import (
"context" "context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"maps"
"math/rand" "math/rand"
"net/http" "net/http"
"runtime" "runtime"
@ -110,9 +111,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
} }
} }
e.kmsContext = kms.Context{} e.kmsContext = kms.Context{}
for k, v := range ctx { maps.Copy(e.kmsContext, ctx)
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil { if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
return err return err
@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
// Since we are rotating the keys, make sure to update the metadata. // Since we are rotating the keys, make sure to update the metadata.
oi.metadataOnly = true oi.metadataOnly = true
oi.keyRotation = true oi.keyRotation = true
for k, v := range encMetadata { maps.Copy(oi.UserDefined, encMetadata)
oi.UserDefined[k] = v
}
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{ if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
VersionID: oi.VersionID, VersionID: oi.VersionID,
}, ObjectOptions{ }, ObjectOptions{

View File

@ -51,8 +51,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// benchmark utility which helps obtain number of allocations and bytes allocated per ops. // benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs() b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer. // the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; b.Loop(); i++ {
// insert the object. // insert the object.
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i), objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{}) mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// benchmark utility which helps obtain number of allocations and bytes allocated per ops. // benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs() b.ReportAllocs()
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer. // the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; b.Loop(); i++ {
// insert the object. // insert the object.
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize))) totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
for j := 0; j < totalPartsNR; j++ { for j := range totalPartsNR {
if j < totalPartsNR-1 { if j < totalPartsNR-1 {
textPartData = textData[j*partSize : (j+1)*partSize-1] textPartData = textData[j*partSize : (j+1)*partSize-1]
} else { } else {

View File

@ -154,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50) g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
for index := range bucketsToBeUpdatedSlice { for index := range bucketsToBeUpdatedSlice {
index := index
g.Go(func() error { g.Go(func() error {
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index]) return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
}, index) }, index)
@ -1387,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Set the correct hex md5sum for the fan-out stream. // Set the correct hex md5sum for the fan-out stream.
fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil)) fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil))
concurrentSize := 100 concurrentSize := min(runtime.GOMAXPROCS(0), 100)
if runtime.GOMAXPROCS(0) < concurrentSize {
concurrentSize = runtime.GOMAXPROCS(0)
}
fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries)) fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries))
eventArgsList := make([]eventArgs, 0, len(fanOutEntries)) eventArgsList := make([]eventArgs, 0, len(fanOutEntries))

View File

@ -657,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
sha256sum := "" sha256sum := ""
var objectNames []string var objectNames []string
for i := 0; i < 10; i++ { for i := range 10 {
contentBytes := []byte("hello") contentBytes := []byte("hello")
objectName := "test-object-" + strconv.Itoa(i) objectName := "test-object-" + strconv.Itoa(i)
if i == 0 { if i == 0 {
@ -687,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// The following block will create a bucket policy with delete object to 'public/*'. This is // The following block will create a bucket policy with delete object to 'public/*'. This is
// to test a mixed response of a successful & failure while deleting objects in a single request // to test a mixed response of a successful & failure while deleting objects in a single request
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)) policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes), req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
credentials.AccessKey, credentials.SecretKey, nil) credentials.AccessKey, credentials.SecretKey, nil)

View File

@ -23,6 +23,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -959,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
UserDefined: meta, UserDefined: meta,
} }
} }
for k, v := range objInfo.UserDefined { maps.Copy(meta, objInfo.UserDefined)
meta[k] = v
}
if len(objInfo.UserTags) != 0 { if len(objInfo.UserTags) != 0 {
meta[xhttp.AmzObjectTagging] = objInfo.UserTags meta[xhttp.AmzObjectTagging] = objInfo.UserTags
} }

View File

@ -472,7 +472,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
return meta, reloaded, nil return meta, reloaded, nil
} }
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) { val, err, _ := sys.group.Do(bucket, func() (val any, err error) {
meta, err = loadBucketMetadata(ctx, objAPI, bucket) meta, err = loadBucketMetadata(ctx, objAPI, bucket)
if err != nil { if err != nil {
if !sys.Initialized() { if !sys.Initialized() {
@ -511,7 +511,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
g := errgroup.WithNErrs(len(buckets)) g := errgroup.WithNErrs(len(buckets))
bucketMetas := make([]BucketMetadata, len(buckets)) bucketMetas := make([]BucketMetadata, len(buckets))
for index := range buckets { for index := range buckets {
index := index
g.Go(func() error { g.Go(func() error {
// Sleep and stagger to avoid blocked CPU and thundering // Sleep and stagger to avoid blocked CPU and thundering
// herd upon start up sequence. // herd upon start up sequence.

View File

@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
var wg sync.WaitGroup var wg sync.WaitGroup
var mu sync.Mutex var mu sync.Mutex
wg.Add(n) wg.Add(n)
for i := 0; i < n; i++ { for range n {
go func() { go func() {
defer wg.Done() defer wg.Done()
// Sync start. // Sync start.
@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Test case - 1. // Test case - 1.
{ {
bucketName: bucketName, bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Expecting StatusBadRequest (400). // Expecting StatusBadRequest (400).
{ {
bucketName: bucketName, bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: maxBucketPolicySize + 1, policyLen: maxBucketPolicySize + 1,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Expecting the HTTP response status to be StatusLengthRequired (411). // Expecting the HTTP response status to be StatusLengthRequired (411).
{ {
bucketName: bucketName, bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: 0, policyLen: 0,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// checkBucketPolicyResources should fail. // checkBucketPolicyResources should fail.
{ {
bucketName: bucketName1, bucketName: bucketName1,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 404 StatusNotFound // should result in 404 StatusNotFound
{ {
bucketName: "non-existent-bucket", bucketName: "non-existent-bucket",
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 404 StatusNotFound // should result in 404 StatusNotFound
{ {
bucketName: ".invalid-bucket", bucketName: ".invalid-bucket",
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 400 StatusBadRequest. // should result in 400 StatusBadRequest.
{ {
bucketName: bucketName, bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))), bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"encoding/json" "encoding/json"
"maps"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -187,9 +188,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
} }
cloneURLValues := make(url.Values, len(r.Form)) cloneURLValues := make(url.Values, len(r.Form))
for k, v := range r.Form { maps.Copy(cloneURLValues, r.Form)
cloneURLValues[k] = v
}
for _, objLock := range []string{ for _, objLock := range []string{
xhttp.AmzObjectLockMode, xhttp.AmzObjectLockMode,
@ -224,7 +223,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
// Add groups claim which could be a list. This will ensure that the claim // Add groups claim which could be a list. This will ensure that the claim
// `jwt:groups` works. // `jwt:groups` works.
if grpsVal, ok := claims["groups"]; ok { if grpsVal, ok := claims["groups"]; ok {
if grpsIs, ok := grpsVal.([]interface{}); ok { if grpsIs, ok := grpsVal.([]any); ok {
grps := []string{} grps := []string{}
for _, gI := range grpsIs { for _, gI := range grpsIs {
if g, ok := gI.(string); ok { if g, ok := gI.(string); ok {

View File

@ -21,6 +21,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"maps"
"net/http" "net/http"
"net/url" "net/url"
"regexp" "regexp"
@ -311,7 +312,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
if len(s) == 0 { if len(s) == 0 {
return return
} }
for _, p := range strings.Split(s, ",") { for p := range strings.SplitSeq(s, ",") {
if p == "" { if p == "" {
continue continue
} }
@ -735,9 +736,7 @@ type BucketReplicationResyncStatus struct {
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) { func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
m = make(map[string]TargetReplicationResyncStatus) m = make(map[string]TargetReplicationResyncStatus)
for arn, st := range rs.TargetsMap { maps.Copy(m, rs.TargetsMap)
m[arn] = st
}
return return
} }

View File

@ -24,6 +24,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@ -803,9 +804,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
} else { } else {
cs, mp := getCRCMeta(objInfo, 0, nil) cs, mp := getCRCMeta(objInfo, 0, nil)
// Set object checksum. // Set object checksum.
for k, v := range cs { maps.Copy(meta, cs)
meta[k] = v
}
isMP = mp isMP = mp
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject { if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
// For objects where checksum is full object, it will be the same. // For objects where checksum is full object, it will be the same.
@ -969,9 +968,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
t, _ := tags.ParseObjectTags(oi1.UserTags) t, _ := tags.ParseObjectTags(oi1.UserTags)
oi2Map := make(map[string]string) oi2Map := make(map[string]string)
for k, v := range oi2.UserTags { maps.Copy(oi2Map, oi2.UserTags)
oi2Map[k] = v
}
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) { if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
return replicateMetadata return replicateMetadata
} }
@ -1770,9 +1767,7 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
} }
if !copied { if !copied {
dst = make(map[string]string, len(metadata)) dst = make(map[string]string, len(metadata))
for k, v := range metadata { maps.Copy(dst, metadata)
dst[k] = v
}
copied = true copied = true
} }
delete(dst, key) delete(dst, key)
@ -2954,7 +2949,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
}() }()
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < resyncParallelRoutines; i++ { for i := range resyncParallelRoutines {
wg.Add(1) wg.Add(1)
workers[i] = make(chan ReplicateObjectInfo, 100) workers[i] = make(chan ReplicateObjectInfo, 100)
i := i i := i
@ -3063,7 +3058,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
workers[h%uint64(resyncParallelRoutines)] <- roi workers[h%uint64(resyncParallelRoutines)] <- roi
} }
} }
for i := 0; i < resyncParallelRoutines; i++ { for i := range resyncParallelRoutines {
xioutil.SafeClose(workers[i]) xioutil.SafeClose(workers[i])
} }
wg.Wait() wg.Wait()
@ -3193,11 +3188,9 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []stri
<-ctx.Done() <-ctx.Done()
return return
} }
duration := time.Duration(r.Float64() * float64(time.Minute)) duration := max(time.Duration(r.Float64()*float64(time.Minute)),
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks. // Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second time.Second)
}
time.Sleep(duration) time.Sleep(duration)
} }
} }

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"fmt" "fmt"
"maps"
"math" "math"
"sync/atomic" "sync/atomic"
"time" "time"
@ -221,9 +222,7 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
} }
if s.Failed.ErrCounts == nil { if s.Failed.ErrCounts == nil {
s.Failed.ErrCounts = make(map[string]int) s.Failed.ErrCounts = make(map[string]int)
for k, v := range st.Failed.ErrCounts { maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts)
s.Failed.ErrCounts[k] = v
}
} }
c.Stats[arn] = &s c.Stats[arn] = &s
} }

View File

@ -20,6 +20,7 @@ package cmd
import ( import (
"context" "context"
"errors" "errors"
"maps"
"net/url" "net/url"
"sync" "sync"
"time" "time"
@ -236,9 +237,7 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth {
sys.hMutex.RLock() sys.hMutex.RLock()
defer sys.hMutex.RUnlock() defer sys.hMutex.RUnlock()
m := make(map[string]epHealth, len(sys.hc)) m := make(map[string]epHealth, len(sys.hc))
for k, v := range sys.hc { maps.Copy(m, sys.hc)
m[k] = v
}
return m return m
} }

View File

@ -57,11 +57,9 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) {
// callhome running on a different node. // callhome running on a different node.
// sleep for some time and try again. // sleep for some time and try again.
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur())) duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())),
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks. // Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second time.Second)
}
time.Sleep(duration) time.Sleep(duration)
} }
}() }()

View File

@ -105,7 +105,7 @@ func init() {
gob.Register(madmin.TimeInfo{}) gob.Register(madmin.TimeInfo{})
gob.Register(madmin.XFSErrorConfigs{}) gob.Register(madmin.XFSErrorConfigs{})
gob.Register(map[string]string{}) gob.Register(map[string]string{})
gob.Register(map[string]interface{}{}) gob.Register(map[string]any{})
// All minio-go and madmin-go API operations shall be performed only once, // All minio-go and madmin-go API operations shall be performed only once,
// another way to look at this is we are turning off retries. // another way to look at this is we are turning off retries.
@ -258,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) {
if !serverDebugLog { if !serverDebugLog {
// Disable console logging if server debug log is not enabled // Disable console logging if server debug log is not enabled
noLog := func(string, ...interface{}) {} noLog := func(string, ...any) {}
consoleapi.LogInfo = noLog consoleapi.LogInfo = noLog
consoleapi.LogError = noLog consoleapi.LogError = noLog
@ -761,7 +761,7 @@ func serverHandleEnvVars() {
domains := env.Get(config.EnvDomain, "") domains := env.Get(config.EnvDomain, "")
if len(domains) != 0 { if len(domains) != 0 {
for _, domainName := range strings.Split(domains, config.ValueSeparator) { for domainName := range strings.SplitSeq(domains, config.ValueSeparator) {
if _, ok := dns2.IsDomainName(domainName); !ok { if _, ok := dns2.IsDomainName(domainName); !ok {
logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName), logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName),
"Invalid MINIO_DOMAIN value in environment variable") "Invalid MINIO_DOMAIN value in environment variable")
@ -1059,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false return time.Time{}, false
} }
func (a bgCtx) Value(key interface{}) interface{} { func (a bgCtx) Value(key any) any {
return a.parent.Value(key) return a.parent.Value(key)
} }

View File

@ -43,7 +43,6 @@ func Test_readFromSecret(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil { if err != nil {
@ -155,7 +154,6 @@ MINIO_ROOT_PASSWORD=minio123`,
}, },
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil { if err != nil {

View File

@ -21,6 +21,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"maps"
"strings" "strings"
"sync" "sync"
@ -78,12 +79,8 @@ func initHelp() {
config.BatchSubSys: batch.DefaultKVS, config.BatchSubSys: batch.DefaultKVS,
config.BrowserSubSys: browser.DefaultKVS, config.BrowserSubSys: browser.DefaultKVS,
} }
for k, v := range notify.DefaultNotificationKVS { maps.Copy(kvs, notify.DefaultNotificationKVS)
kvs[k] = v maps.Copy(kvs, lambda.DefaultLambdaKVS)
}
for k, v := range lambda.DefaultLambdaKVS {
kvs[k] = v
}
if globalIsErasure { if globalIsErasure {
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
kvs[config.HealSubSys] = heal.DefaultKVS kvs[config.HealSubSys] = heal.DefaultKVS

View File

@ -38,12 +38,12 @@ import (
) )
// Save config file to corresponding backend // Save config file to corresponding backend
func Save(configFile string, data interface{}) error { func Save(configFile string, data any) error {
return quick.SaveConfig(data, configFile, globalEtcdClient) return quick.SaveConfig(data, configFile, globalEtcdClient)
} }
// Load config from backend // Load config from backend
func Load(configFile string, data interface{}) (quick.Config, error) { func Load(configFile string, data any) (quick.Config, error) {
return quick.LoadConfig(configFile, globalEtcdClient, data) return quick.LoadConfig(configFile, globalEtcdClient, data)
} }

View File

@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte)
return saveConfig(ctx, objAPI, historyFile, kv) return saveConfig(ctx, objAPI, historyFile, kv)
} }
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error { func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error {
data, err := json.Marshal(cfg) data, err := json.Marshal(cfg)
if err != nil { if err != nil {
return err return err

View File

@ -101,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
lastN = make([]log.Info, last) lastN = make([]log.Info, last)
sys.RLock() sys.RLock()
sys.logBuf.Do(func(p interface{}) { sys.logBuf.Do(func(p any) {
if p != nil { if p != nil {
lg, ok := p.(log.Info) lg, ok := p.(log.Info)
if ok && lg.SendLog(node, logKind) { if ok && lg.SendLog(node, logKind) {
@ -155,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats {
// Content returns the console stdout log // Content returns the console stdout log
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) { func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
sys.RLock() sys.RLock()
sys.logBuf.Do(func(p interface{}) { sys.logBuf.Do(func(p any) {
if p != nil { if p != nil {
lg, ok := p.(log.Info) lg, ok := p.(log.Info)
if ok { if ok {
@ -181,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType {
// Send log message 'e' to console and publish to console // Send log message 'e' to console and publish to console
// log pubsub system // log pubsub system
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error { func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error {
var lg log.Info var lg log.Info
switch e := entry.(type) { switch e := entry.(type) {
case log.Entry: case log.Entry:

View File

@ -198,7 +198,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p
func (p *scannerMetrics) getCurrentPaths() []string { func (p *scannerMetrics) getCurrentPaths() []string {
var res []string var res []string
prefix := globalLocalNodeName + "/" prefix := globalLocalNodeName + "/"
p.currentPaths.Range(func(key, value interface{}) bool { p.currentPaths.Range(func(key, value any) bool {
// We are a bit paranoid, but better miss an entry than crash. // We are a bit paranoid, but better miss an entry than crash.
name, ok := key.(string) name, ok := key.(string)
if !ok { if !ok {
@ -221,7 +221,7 @@ func (p *scannerMetrics) getCurrentPaths() []string {
// (since this is concurrent it may not be 100% reliable) // (since this is concurrent it may not be 100% reliable)
func (p *scannerMetrics) activeDrives() int { func (p *scannerMetrics) activeDrives() int {
var i int var i int
p.currentPaths.Range(func(k, v interface{}) bool { p.currentPaths.Range(func(k, v any) bool {
i++ i++
return true return true
}) })
@ -299,7 +299,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
m.CollectedAt = time.Now() m.CollectedAt = time.Now()
m.ActivePaths = p.getCurrentPaths() m.ActivePaths = p.getCurrentPaths()
m.LifeTimeOps = make(map[string]uint64, scannerMetricLast) m.LifeTimeOps = make(map[string]uint64, scannerMetricLast)
for i := scannerMetric(0); i < scannerMetricLast; i++ { for i := range scannerMetricLast {
if n := atomic.LoadUint64(&p.operations[i]); n > 0 { if n := atomic.LoadUint64(&p.operations[i]); n > 0 {
m.LifeTimeOps[i.String()] = n m.LifeTimeOps[i.String()] = n
} }
@ -309,7 +309,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
} }
m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime) m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime)
for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ { for i := range scannerMetricLastRealtime {
lm := p.lastMinute(i) lm := p.lastMinute(i)
if lm.N > 0 { if lm.N > 0 {
m.LastMinute.Actions[i.String()] = lm.asTimedAction() m.LastMinute.Actions[i.String()] = lm.asTimedAction()

View File

@ -78,11 +78,9 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
// Run the data scanner in a loop // Run the data scanner in a loop
for { for {
runDataScanner(ctx, objAPI) runDataScanner(ctx, objAPI)
duration := time.Duration(r.Float64() * float64(scannerCycle.Load())) duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())),
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks. // Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second time.Second)
}
time.Sleep(duration) time.Sleep(duration)
} }
}() }()

View File

@ -127,7 +127,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
v2 uuid-2 modTime -3m v2 uuid-2 modTime -3m
v1 uuid-1 modTime -4m v1 uuid-1 modTime -4m
*/ */
for i := 0; i < 5; i++ { for i := range 5 {
fivs[i] = FileInfo{ fivs[i] = FileInfo{
Volume: bucket, Volume: bucket,
Name: obj, Name: obj,

View File

@ -22,6 +22,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"math/rand" "math/rand"
"net/http" "net/http"
"path" "path"
@ -99,9 +100,7 @@ func (ats *allTierStats) clone() *allTierStats {
} }
dst := *ats dst := *ats
dst.Tiers = make(map[string]tierStats, len(ats.Tiers)) dst.Tiers = make(map[string]tierStats, len(ats.Tiers))
for tier, st := range ats.Tiers { maps.Copy(dst.Tiers, ats.Tiers)
dst.Tiers[tier] = st
}
return &dst return &dst
} }
@ -347,9 +346,7 @@ func (e dataUsageEntry) clone() dataUsageEntry {
// We operate on a copy from the receiver. // We operate on a copy from the receiver.
if e.Children != nil { if e.Children != nil {
ch := make(dataUsageHashMap, len(e.Children)) ch := make(dataUsageHashMap, len(e.Children))
for k, v := range e.Children { maps.Copy(ch, e.Children)
ch[k] = v
}
e.Children = ch e.Children = ch
} }

View File

@ -179,7 +179,7 @@ func TestDataUsageUpdate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// Changed dir must be picked up in this many cycles. // Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ { for range dataUsageUpdateDirCycles {
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep) got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++ got.Info.NextCycle++
if err != nil { if err != nil {
@ -428,7 +428,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// Changed dir must be picked up in this many cycles. // Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ { for range dataUsageUpdateDirCycles {
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep) got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++ got.Info.NextCycle++
if err != nil { if err != nil {
@ -526,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
// generateUsageTestFiles create nFolders * nFiles files of size bytes each. // generateUsageTestFiles create nFolders * nFiles files of size bytes each.
func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) { func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) {
pl := make([]byte, size) pl := make([]byte, size)
for i := 0; i < nFolders; i++ { for i := range nFolders {
name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt") name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt")
err := os.MkdirAll(filepath.Dir(name), os.ModePerm) err := os.MkdirAll(filepath.Dir(name), os.ModePerm)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
for j := 0; j < nFiles; j++ { for j := range nFiles {
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt") name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
err = os.WriteFile(name, pl, os.ModePerm) err = os.WriteFile(name, pl, os.ModePerm)
if err != nil { if err != nil {
@ -618,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
} }
// equalAsJSON returns whether the values are equal when encoded as JSON. // equalAsJSON returns whether the values are equal when encoded as JSON.
func equalAsJSON(a, b interface{}) bool { func equalAsJSON(a, b any) bool {
aj, err := json.Marshal(a) aj, err := json.Marshal(a)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -129,12 +129,9 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
if failPct > dynamicTimeoutIncreaseThresholdPct { if failPct > dynamicTimeoutIncreaseThresholdPct {
// We are hitting the timeout too often, so increase the timeout by 25% // We are hitting the timeout too often, so increase the timeout by 25%
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100 timeout := min(
// Set upper cap.
// Set upper cap. atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout))
if timeout > int64(maxDynamicTimeout) {
timeout = int64(maxDynamicTimeout)
}
// Safety, shouldn't happen // Safety, shouldn't happen
if timeout < dt.minimum { if timeout < dt.minimum {
timeout = dt.minimum timeout = dt.minimum

View File

@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogFailure() timeout.LogFailure()
} }
@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogFailure() timeout.LogFailure()
} }
adjusted := timeout.Timeout() adjusted := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogFailure() timeout.LogFailure()
} }
@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogSuccess(20 * time.Second) timeout.LogSuccess(20 * time.Second)
} }
@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogSuccess(20 * time.Second) timeout.LogSuccess(20 * time.Second)
} }
adjusted := timeout.Timeout() adjusted := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogSuccess(20 * time.Second) timeout.LogSuccess(20 * time.Second)
} }
@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
const successTimeout = 20 * time.Second const successTimeout = 20 * time.Second
for l := 0; l < 100; l++ { for range 100 {
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogSuccess(successTimeout) timeout.LogSuccess(successTimeout)
} }
} }
@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
rng := rand.New(rand.NewSource(int64(i))) rng := rand.New(rand.NewSource(int64(i)))
go func() { go func() {
defer wg.Done() defer wg.Done()
for i := 0; i < 100; i++ { for range 100 {
for j := 0; j < 100; j++ { for range 100 {
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64())) timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
} }
to := timeout.Timeout() to := timeout.Timeout()
@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
const successTimeout = 20 * time.Second const successTimeout = 20 * time.Second
for l := 0; l < 100; l++ { for range 100 {
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
timeout.LogSuccess(successTimeout) timeout.LogSuccess(successTimeout)
} }
} }
@ -166,13 +166,9 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) { func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
const successTimeout = 20 * time.Second const successTimeout = 20 * time.Second
for i := 0; i < dynamicTimeoutLogSize; i++ { for range dynamicTimeoutLogSize {
rnd := f() rnd := f()
duration := time.Duration(float64(successTimeout) * rnd) duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond)
if duration < 100*time.Millisecond {
duration = 100 * time.Millisecond
}
if duration >= time.Minute { if duration >= time.Minute {
timeout.LogFailure() timeout.LogFailure()
} else { } else {
@ -188,7 +184,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
for try := 0; try < 10; try++ { for range 10 {
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64) testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
} }
@ -205,7 +201,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
initial := timeout.Timeout() initial := timeout.Timeout()
for try := 0; try < 10; try++ { for range 10 {
testDynamicTimeoutAdjust(t, timeout, func() float64 { testDynamicTimeoutAdjust(t, timeout, func() float64 {
return 1.0 + rand.NormFloat64() return 1.0 + rand.NormFloat64()
}) })

View File

@ -29,6 +29,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"net/http" "net/http"
"path" "path"
"strconv" "strconv"
@ -117,10 +118,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
names = make([]string, 0, BatchSize) names = make([]string, 0, BatchSize)
) )
for len(objects) > 0 { for len(objects) > 0 {
N := BatchSize N := min(len(objects), BatchSize)
if len(objects) < BatchSize {
N = len(objects)
}
batch := objects[:N] batch := objects[:N]
// We have to decrypt only ETags of SSE-S3 single-part // We have to decrypt only ETags of SSE-S3 single-part
@ -317,9 +315,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
// of the client provided context and add the bucket // of the client provided context and add the bucket
// key, if not present. // key, if not present.
kmsCtx := kms.Context{} kmsCtx := kms.Context{}
for k, v := range cryptoCtx { maps.Copy(kmsCtx, cryptoCtx)
kmsCtx[k] = v
}
if _, ok := kmsCtx[bucket]; !ok { if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object) kmsCtx[bucket] = path.Join(bucket, object)
} }
@ -389,9 +385,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
// of the client provided context and add the bucket // of the client provided context and add the bucket
// key, if not present. // key, if not present.
kmsCtx := kms.Context{} kmsCtx := kms.Context{}
for k, v := range cryptoCtx { maps.Copy(kmsCtx, cryptoCtx)
kmsCtx[k] = v
}
if _, ok := kmsCtx[bucket]; !ok { if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object) kmsCtx[bucket] = path.Join(bucket, object)
} }

View File

@ -384,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) {
// Simple useful utilities // Simple useful utilities
repeat = func(k int64, n int) []int64 { repeat = func(k int64, n int) []int64 {
a := []int64{} a := []int64{}
for i := 0; i < n; i++ { for range n {
a = append(a, k) a = append(a, k)
} }
return a return a
@ -471,10 +471,7 @@ func TestGetDecryptedRange(t *testing.T) {
// round up the lbPartOffset // round up the lbPartOffset
// to the end of the // to the end of the
// corresponding DARE package // corresponding DARE package
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v)
if lbPkgEndOffset > v {
lbPkgEndOffset = v
}
bytesToDrop := v - lbPkgEndOffset bytesToDrop := v - lbPkgEndOffset
// Last segment to update `l` // Last segment to update `l`

View File

@ -22,7 +22,7 @@ import (
"fmt" "fmt"
"net/url" "net/url"
"runtime" "runtime"
"sort" "slices"
"strings" "strings"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
@ -122,9 +122,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
// eyes that we prefer a sorted setCount slice for the // eyes that we prefer a sorted setCount slice for the
// subsequent function to figure out the right common // subsequent function to figure out the right common
// divisor, it avoids loops. // divisor, it avoids loops.
sort.Slice(setCounts, func(i, j int) bool { slices.Sort(setCounts)
return setCounts[i] < setCounts[j]
})
return setCounts return setCounts
} }

View File

@ -55,7 +55,6 @@ func TestCreateServerEndpoints(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
srvCtxt := serverCtxt{} srvCtxt := serverCtxt{}
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt) err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
@ -85,7 +84,6 @@ func TestGetDivisibleSize(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
gotGCD := getDivisibleSize(testCase.totalSizes) gotGCD := getDivisibleSize(testCase.totalSizes)
if testCase.result != gotGCD { if testCase.result != gotGCD {
@ -172,7 +170,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args { for i, arg := range testCase.args {
@ -294,7 +291,6 @@ func TestGetSetIndexes(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args { for i, arg := range testCase.args {
@ -637,7 +633,6 @@ func TestParseEndpointSet(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
gotEs, err := parseEndpointSet(0, testCase.arg) gotEs, err := parseEndpointSet(0, testCase.arg)
if err != nil && testCase.success { if err != nil && testCase.success {

View File

@ -312,7 +312,6 @@ func TestCreateEndpoints(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
i := i
testCase := testCase testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
var srvCtxt serverCtxt var srvCtxt serverCtxt

View File

@ -136,10 +136,7 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
shardSize := e.ShardSize() shardSize := e.ShardSize()
shardFileSize := e.ShardFileSize(totalLength) shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + length) / e.blockSize endShard := (startOffset + length) / e.blockSize
tillOffset := endShard*shardSize + shardSize tillOffset := min(endShard*shardSize+shardSize, shardFileSize)
if tillOffset > shardFileSize {
tillOffset = shardFileSize
}
return tillOffset return tillOffset
} }

View File

@ -30,7 +30,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
var mu sync.Mutex var mu sync.Mutex
r := rand.New(rand.NewSource(time.Now().UnixNano())) r := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, i := range r.Perm(len(disks)) { for _, i := range r.Perm(len(disks)) {
i := i
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()

View File

@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
// Verify erasure.Decode() for random offsets and lengths. // Verify erasure.Decode() for random offsets and lengths.
for i := 0; i < iterations; i++ { for range iterations {
offset := r.Int63n(length) offset := r.Int63n(length)
readLen := r.Int63n(length - offset) readLen := r.Int63n(length - offset)
@ -308,17 +308,16 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
b.Fatalf("failed to create erasure test file: %v", err) b.Fatalf("failed to create erasure test file: %v", err)
} }
for i := 0; i < dataDown; i++ { for i := range dataDown {
writers[i] = nil writers[i] = nil
} }
for i := data; i < data+parityDown; i++ { for i := data; i < data+parityDown; i++ {
writers[i] = nil writers[i] = nil
} }
b.ResetTimer()
b.SetBytes(size) b.SetBytes(size)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
bitrotReaders := make([]io.ReaderAt, len(disks)) bitrotReaders := make([]io.ReaderAt, len(disks))
for index, disk := range disks { for index, disk := range disks {
if writers[index] == nil { if writers[index] == nil {

View File

@ -172,17 +172,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
buffer := make([]byte, blockSizeV2, 2*blockSizeV2) buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
content := make([]byte, size) content := make([]byte, size)
for i := 0; i < dataDown; i++ { for i := range dataDown {
disks[i] = OfflineDisk disks[i] = OfflineDisk
} }
for i := data; i < data+parityDown; i++ { for i := data; i < data+parityDown; i++ {
disks[i] = OfflineDisk disks[i] = OfflineDisk
} }
b.ResetTimer()
b.SetBytes(size) b.SetBytes(size)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
writers := make([]io.Writer, len(disks)) writers := make([]io.Writer, len(disks))
for i, disk := range disks { for i, disk := range disks {
if disk == OfflineDisk { if disk == OfflineDisk {

View File

@ -102,7 +102,7 @@ func TestErasureHeal(t *testing.T) {
// setup stale disks for the test case // setup stale disks for the test case
staleDisks := make([]StorageAPI, len(disks)) staleDisks := make([]StorageAPI, len(disks))
copy(staleDisks, disks) copy(staleDisks, disks)
for j := 0; j < len(staleDisks); j++ { for j := range staleDisks {
if j < test.offDisks { if j < test.offDisks {
readers[j] = nil readers[j] = nil
} else { } else {

View File

@ -175,7 +175,7 @@ func TestListOnlineDisks(t *testing.T) {
fourNanoSecs := time.Unix(4, 0).UTC() fourNanoSecs := time.Unix(4, 0).UTC()
modTimesThreeNone := make([]time.Time, 16) modTimesThreeNone := make([]time.Time, 16)
modTimesThreeFour := make([]time.Time, 16) modTimesThreeFour := make([]time.Time, 16)
for i := 0; i < 16; i++ { for i := range 16 {
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one // Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
// to be tampered with. // to be tampered with.
if i > 12 { if i > 12 {
@ -244,7 +244,6 @@ func TestListOnlineDisks(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil { if err != nil {
@ -350,7 +349,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
fourNanoSecs := time.Unix(4, 0).UTC() fourNanoSecs := time.Unix(4, 0).UTC()
modTimesThreeNone := make([]time.Time, 16) modTimesThreeNone := make([]time.Time, 16)
modTimesThreeFour := make([]time.Time, 16) modTimesThreeFour := make([]time.Time, 16)
for i := 0; i < 16; i++ { for i := range 16 {
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one // Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
// to be tampered with. // to be tampered with.
if i > 12 { if i > 12 {
@ -419,7 +418,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err := obj.PutObject(ctx, bucket, object, _, err := obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
@ -753,7 +751,7 @@ func TestCommonParities(t *testing.T) {
} }
for idx, test := range tests { for idx, test := range tests {
var metaArr []FileInfo var metaArr []FileInfo
for i := 0; i < 12; i++ { for i := range 12 {
fi := test.fi1 fi := test.fi1
if i%2 == 0 { if i%2 == 0 {
fi = test.fi2 fi = test.fi2

View File

@ -116,7 +116,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error { func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
for index := range storageDisks { for index := range storageDisks {
index := index
g.Go(func() error { g.Go(func() error {
if storageDisks[index] == nil { if storageDisks[index] == nil {
// we ignore disk not found errors // we ignore disk not found errors

View File

@ -296,7 +296,6 @@ func TestIsObjectDangling(t *testing.T) {
// Add new cases as seen // Add new cases as seen
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) { t.Run(testCase.name, func(t *testing.T) {
gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs) gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs)
if !gotMeta.Equals(testCase.expectedMeta) { if !gotMeta.Equals(testCase.expectedMeta) {

View File

@ -204,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
// Read `xl.meta` in parallel across disks. // Read `xl.meta` in parallel across disks.
for index := range disks { for index := range disks {
index := index
g.Go(func() (err error) { g.Go(func() (err error) {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound

View File

@ -55,7 +55,7 @@ func TestDiskCount(t *testing.T) {
// of errors into a single maximal error with in the list. // of errors into a single maximal error with in the list.
func TestReduceErrs(t *testing.T) { func TestReduceErrs(t *testing.T) {
canceledErrs := make([]error, 0, 5) canceledErrs := make([]error, 0, 5)
for i := 0; i < 5; i++ { for i := range 5 {
canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled)) canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled))
} }
// List all of all test cases to validate various cases of reduce errors. // List all of all test cases to validate various cases of reduce errors.
@ -222,7 +222,7 @@ func Test_hashOrder(t *testing.T) {
var tmp [16]byte var tmp [16]byte
rng.Read(tmp[:]) rng.Read(tmp[:])
prefix := hex.EncodeToString(tmp[:]) prefix := hex.EncodeToString(tmp[:])
for i := 0; i < 10000; i++ { for range 10000 {
rng.Read(tmp[:]) rng.Read(tmp[:])
y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x) y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x)

View File

@ -408,7 +408,6 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc
// Start writing `xl.meta` to all disks in parallel. // Start writing `xl.meta` to all disks in parallel.
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound

View File

@ -189,7 +189,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
commonNumVersions := 2 commonNumVersions := 2
numVersionsInQuorum := make([]int, 16) numVersionsInQuorum := make([]int, 16)
numVersionsNoQuorum := make([]int, 16) numVersionsNoQuorum := make([]int, 16)
for i := 0; i < 16; i++ { for i := range 16 {
if i < 4 { if i < 4 {
continue continue
} }
@ -269,7 +269,6 @@ func TestFindFileInfoInQuorum(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
test := test
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum) fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum)
_, ok1 := err.(InsufficientReadQuorum) _, ok1 := err.(InsufficientReadQuorum)
@ -316,7 +315,7 @@ func TestTransitionInfoEquals(t *testing.T) {
} }
var i uint var i uint
for i = 0; i < 8; i++ { for i = range uint(8) {
fi := FileInfo{ fi := FileInfo{
TransitionTier: inputs[0].tier, TransitionTier: inputs[0].tier,
TransitionedObjName: inputs[0].remoteObjName, TransitionedObjName: inputs[0].remoteObjName,

View File

@ -322,7 +322,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
uploads = append(uploads, MultipartInfo{ uploads = append(uploads, MultipartInfo{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))), UploadID: base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadID)),
Initiated: startTime, Initiated: startTime,
}) })
populatedUploadIDs.Add(uploadID) populatedUploadIDs.Add(uploadID)
@ -498,7 +498,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
partsMetadata[index].Metadata = userDefined partsMetadata[index].Metadata = userDefined
} }
uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano()) uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano())
uploadID := base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadUUID))) uploadID := base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadUUID))
uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID)
// Write updated `xl.meta` to all disks. // Write updated `xl.meta` to all disks.
@ -540,7 +540,6 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src
// Rename file on all underlying storage disks. // Rename file on all underlying storage disks.
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -820,7 +819,6 @@ func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI
objectParts := make([][]string, len(onlineDisks)) objectParts := make([][]string, len(onlineDisks))
// List uploaded parts from drives. // List uploaded parts from drives.
for index := range onlineDisks { for index := range onlineDisks {
index := index
g.Go(func() (err error) { g.Go(func() (err error) {
if onlineDisks[index] == nil { if onlineDisks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -995,7 +993,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
objectPartInfos := make([][]*ObjectPartInfo, len(disks)) objectPartInfos := make([][]*ObjectPartInfo, len(disks))
// Rename file on all underlying storage disks. // Rename file on all underlying storage disks.
for index := range disks { for index := range disks {
index := index
g.Go(func() (err error) { g.Go(func() (err error) {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound

View File

@ -24,6 +24,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"net/http" "net/http"
"path" "path"
"runtime" "runtime"
@ -542,7 +543,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
disks := er.getDisks() disks := er.getDisks()
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -575,7 +575,6 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
rawFileInfos := make([]RawFileInfo, len(disks)) rawFileInfos := make([]RawFileInfo, len(disks))
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
for index := range disks { for index := range disks {
index := index
g.Go(func() (err error) { g.Go(func() (err error) {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -1029,7 +1028,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
dataDirs := make([]string, len(disks)) dataDirs := make([]string, len(disks))
// Rename file on all underlying storage disks. // Rename file on all underlying storage disks.
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -1631,7 +1629,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -1836,7 +1833,6 @@ func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object
} }
g := errgroup.WithNErrs(len(onlineDisks)) g := errgroup.WithNErrs(len(onlineDisks))
for index := range onlineDisks { for index := range onlineDisks {
index := index
g.Go(func() error { g.Go(func() error {
if onlineDisks[index] == nil { if onlineDisks[index] == nil {
return nil return nil
@ -1862,7 +1858,6 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
if disks[index] == nil { if disks[index] == nil {
return nil return nil
@ -2222,9 +2217,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
return ObjectInfo{}, err return ObjectInfo{}, err
} }
} }
for k, v := range objInfo.UserDefined { maps.Copy(fi.Metadata, objInfo.UserDefined)
fi.Metadata[k] = v
}
fi.ModTime = opts.MTime fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID fi.VersionID = opts.VersionID
@ -2294,9 +2287,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
fi.Metadata[xhttp.AmzObjectTagging] = tags fi.Metadata[xhttp.AmzObjectTagging] = tags
fi.ReplicationState = opts.PutReplicationState() fi.ReplicationState = opts.PutReplicationState()
for k, v := range opts.UserDefined { maps.Copy(fi.Metadata, opts.UserDefined)
fi.Metadata[k] = v
}
if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil { if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
@ -2314,7 +2305,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o
// Start writing `xl.meta` to all disks in parallel. // Start writing `xl.meta` to all disks in parallel.
for index := range onlineDisks { for index := range onlineDisks {
index := index
g.Go(func() error { g.Go(func() error {
if onlineDisks[index] == nil { if onlineDisks[index] == nil {
return errDiskNotFound return errDiskNotFound

View File

@ -112,7 +112,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err) t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
} }
for _, test := range testCases { for _, test := range testCases {
test := test
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{}) _, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{})
if err != nil { if err != nil {
@ -625,7 +624,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for f := 0; f < 2; f++ { for f := range 2 {
diskErrors := make(map[int]error) diskErrors := make(map[int]error)
for i := 0; i <= f; i++ { for i := 0; i <= f; i++ {
diskErrors[i] = nil diskErrors[i] = nil
@ -774,7 +773,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
// in a 16 disk Erasure setup. The original disks are 'replaced' with // in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method // naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,4) // invocations, where f - [0,4)
for f := 0; f < 2; f++ { for f := range 2 {
diskErrors := make(map[int]error) diskErrors := make(map[int]error)
for i := 0; i <= f; i++ { for i := 0; i <= f; i++ {
diskErrors[i] = nil diskErrors[i] = nil
@ -837,7 +836,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
// in a 16 disk Erasure setup. The original disks are 'replaced' with // in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method // naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,2) // invocations, where f - [0,2)
for f := 0; f < 2; f++ { for f := range 2 {
t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) { t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) {
diskErrors := make(map[int]error) diskErrors := make(map[int]error)
for i := 0; i <= f; i++ { for i := 0; i <= f; i++ {
@ -1109,7 +1108,6 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
{parts7, errs7, 11, 11, parts7SC, nil}, {parts7, errs7, 11, 11, parts7SC, nil},
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.(*testing.T).Run("", func(t *testing.T) { t.(*testing.T).Run("", func(t *testing.T) {
globalStorageClass.Update(tt.storageClassCfg) globalStorageClass.Update(tt.storageClassCfg)
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks))) actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks)))

View File

@ -25,6 +25,7 @@ import (
"io" "io"
"math/rand" "math/rand"
"net/http" "net/http"
"slices"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -117,12 +118,7 @@ func (pd *PoolDecommissionInfo) bucketPop(bucket string) bool {
} }
func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool { func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool {
for _, b := range pd.DecommissionedBuckets { return slices.Contains(pd.DecommissionedBuckets, bucket)
if b == bucket {
return true
}
}
return false
} }
func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) { func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) {
@ -792,8 +788,6 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
} }
for setIdx, set := range pool.sets { for setIdx, set := range pool.sets {
set := set
filterLifecycle := func(bucket, object string, fi FileInfo) bool { filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil { if lc == nil {
return false return false
@ -901,7 +895,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
} }
// gr.Close() is ensured by decommissionObject(). // gr.Close() is ensured by decommissionObject().
for try := 0; try < 3; try++ { for range 3 {
if version.IsRemote() { if version.IsRemote() {
if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{ if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{
VersionID: versionID, VersionID: versionID,

View File

@ -176,7 +176,6 @@ func TestPoolMetaValidate(t *testing.T) {
t.Parallel() t.Parallel()
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) { t.Run(testCase.name, func(t *testing.T) {
update, err := testCase.meta.validate(testCase.pools) update, err := testCase.meta.validate(testCase.pools)
if testCase.expectedErr { if testCase.expectedErr {

View File

@ -580,8 +580,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
} }
for setIdx, set := range pool.sets { for setIdx, set := range pool.sets {
set := set
filterLifecycle := func(bucket, object string, fi FileInfo) bool { filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil { if lc == nil {
return false return false
@ -594,7 +592,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal) globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal)
return true return true
} }
return false return false
} }
@ -689,7 +686,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
continue continue
} }
for try := 0; try < 3; try++ { for range 3 {
// GetObjectReader.Close is called by rebalanceObject // GetObjectReader.Close is called by rebalanceObject
gr, err := set.GetObjectNInfo(ctx, gr, err := set.GetObjectNInfo(ctx,
bucket, bucket,

View File

@ -420,7 +420,6 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b
nSets := make([]int, len(z.serverPools)) nSets := make([]int, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools { for index := range z.serverPools {
index := index
// Skip suspended pools or pools participating in rebalance for any new // Skip suspended pools or pools participating in rebalance for any new
// I/O. // I/O.
if z.IsSuspended(index) || z.IsPoolRebalancing(index) { if z.IsSuspended(index) || z.IsPoolRebalancing(index) {
@ -660,7 +659,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
g := errgroup.WithNErrs(len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools { for index := range z.serverPools {
index := index
g.Go(func() error { g.Go(func() error {
return z.serverPools[index].Shutdown(ctx) return z.serverPools[index].Shutdown(ctx)
}, index) }, index)
@ -712,7 +710,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool)
storageInfos := make([]StorageInfo, len(z.serverPools)) storageInfos := make([]StorageInfo, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools { for index := range z.serverPools {
index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics) storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics)
return nil return nil
@ -1268,7 +1265,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools)) eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools))
for i, pool := range z.serverPools { for i, pool := range z.serverPools {
i := i
pool := pool pool := pool
eg.Go(func() error { eg.Go(func() error {
dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts) dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts)
@ -2244,7 +2240,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
for poolIdx, erasureSet := range z.serverPools { for poolIdx, erasureSet := range z.serverPools {
for setIdx, set := range erasureSet.sets { for setIdx, set := range erasureSet.sets {
set := set
listOut := make(chan metaCacheEntry, 1) listOut := make(chan metaCacheEntry, 1)
entries = append(entries, listOut) entries = append(entries, listOut)
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true) disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)

View File

@ -392,7 +392,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
var lk sync.Mutex var lk sync.Mutex
for i := range setCount { for i := range setCount {
lockerEpSet := set.NewStringSet() lockerEpSet := set.NewStringSet()
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
wg.Add(1) wg.Add(1)
go func(i int, endpoint Endpoint) { go func(i int, endpoint Endpoint) {
defer wg.Done() defer wg.Done()
@ -415,7 +415,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
defer wg.Done() defer wg.Done()
var innerWg sync.WaitGroup var innerWg sync.WaitGroup
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
disk := storageDisks[i*setDriveCount+j] disk := storageDisks[i*setDriveCount+j]
if disk == nil { if disk == nil {
continue continue
@ -593,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo {
g := errgroup.WithNErrs(len(s.sets)) g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets { for index := range s.sets {
index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index] = s.sets[index].StorageInfo(ctx) storageInfos[index] = s.sets[index].StorageInfo(ctx)
return nil return nil
@ -618,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag
g := errgroup.WithNErrs(len(s.sets)) g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets { for index := range s.sets {
index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics) storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics)
return nil return nil
@ -641,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
g := errgroup.WithNErrs(len(s.sets)) g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets { for index := range s.sets {
index := index
g.Go(func() error { g.Go(func() error {
return s.sets[index].Shutdown(ctx) return s.sets[index].Shutdown(ctx)
}, index) }, index)
@ -705,7 +702,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error { func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
for index := range storageDisks { for index := range storageDisks {
index := index
g.Go(func() error { g.Go(func() error {
if storageDisks[index] == nil { if storageDisks[index] == nil {
// we ignore disk not found errors // we ignore disk not found errors

View File

@ -40,13 +40,12 @@ func BenchmarkCrcHash(b *testing.B) {
{1024}, {1024},
} }
for _, testCase := range cases { for _, testCase := range cases {
testCase := testCase
key := randString(testCase.key) key := randString(testCase.key)
b.Run("", func(b *testing.B) { b.Run("", func(b *testing.B) {
b.SetBytes(1024) b.SetBytes(1024)
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
crcHashMod(key, 16) crcHashMod(key, 16)
} }
}) })
@ -65,13 +64,12 @@ func BenchmarkSipHash(b *testing.B) {
{1024}, {1024},
} }
for _, testCase := range cases { for _, testCase := range cases {
testCase := testCase
key := randString(testCase.key) key := randString(testCase.key)
b.Run("", func(b *testing.B) { b.Run("", func(b *testing.B) {
b.SetBytes(1024) b.SetBytes(1024)
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
sipHashMod(key, 16, testUUID) sipHashMod(key, 16, testUUID)
} }
}) })
@ -164,7 +162,7 @@ func TestNewErasureSets(t *testing.T) {
nDisks := 16 // Maximum disks. nDisks := 16 // Maximum disks.
var erasureDisks []string var erasureDisks []string
for i := 0; i < nDisks; i++ { for range nDisks {
// Do not attempt to create this path, the test validates // Do not attempt to create this path, the test validates
// so that newErasureSets initializes non existing paths // so that newErasureSets initializes non existing paths
// and successfully returns initialized object layer. // and successfully returns initialized object layer.

View File

@ -21,6 +21,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"maps"
"math/rand" "math/rand"
"os" "os"
"runtime" "runtime"
@ -175,7 +176,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
for index := range disks { for index := range disks {
index := index
g.Go(func() error { g.Go(func() error {
di := madmin.Disk{ di := madmin.Disk{
Endpoint: endpoints[index].String(), Endpoint: endpoints[index].String(),
@ -219,9 +219,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
di.Metrics.LastMinute[k] = v.asTimedAction() di.Metrics.LastMinute[k] = v.asTimedAction()
} }
} }
for k, v := range info.Metrics.APICalls { maps.Copy(di.Metrics.APICalls, info.Metrics.APICalls)
di.Metrics.APICalls[k] = v
}
if info.Total > 0 { if info.Total > 0 {
di.Utilization = float64(info.Used / info.Total * 100) di.Utilization = float64(info.Used / info.Total * 100)
} }
@ -287,7 +285,6 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new
infos := make([]DiskInfo, len(disks)) infos := make([]DiskInfo, len(disks))
r := rand.New(rand.NewSource(time.Now().UnixNano())) r := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, i := range r.Perm(len(disks)) { for _, i := range r.Perm(len(disks)) {
i := i
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()

View File

@ -99,7 +99,7 @@ func fmtGenMain(ctxt *cli.Context) {
format := newFormatErasureV3(setCount, setDriveCount) format := newFormatErasureV3(setCount, setDriveCount)
format.ID = deploymentID format.ID = deploymentID
for i := range setCount { // for each erasure set for i := range setCount { // for each erasure set
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
if deploymentID != "" { if deploymentID != "" {

View File

@ -159,7 +159,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
for i := range numSets { for i := range numSets {
format.Erasure.Sets[i] = make([]string, setLen) format.Erasure.Sets[i] = make([]string, setLen)
for j := 0; j < setLen; j++ { for j := range setLen {
format.Erasure.Sets[i][j] = mustGetUUID() format.Erasure.Sets[i][j] = mustGetUUID()
} }
} }
@ -324,7 +324,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
// Load format from each disk in parallel // Load format from each disk in parallel
for index := range storageDisks { for index := range storageDisks {
index := index
g.Go(func() error { g.Go(func() error {
if storageDisks[index] == nil { if storageDisks[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -530,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format
// Write `format.json` to all disks. // Write `format.json` to all disks.
for index := range storageDisks { for index := range storageDisks {
index := index
g.Go(func() error { g.Go(func() error {
if formats[index] == nil { if formats[index] == nil {
return errDiskNotFound return errDiskNotFound
@ -566,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag
storageDisks := make([]StorageAPI, len(endpoints)) storageDisks := make([]StorageAPI, len(endpoints))
g := errgroup.WithNErrs(len(endpoints)) g := errgroup.WithNErrs(len(endpoints))
for index := range endpoints { for index := range endpoints {
index := index
g.Go(func() (err error) { g.Go(func() (err error) {
storageDisks[index], err = newStorageAPI(endpoints[index], opts) storageDisks[index], err = newStorageAPI(endpoints[index], opts)
return err return err
@ -600,7 +597,6 @@ func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool {
func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error { func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error {
g := errgroup.WithNErrs(len(formats)) g := errgroup.WithNErrs(len(formats))
for i := range formats { for i := range formats {
i := i
g.Go(func() error { g.Go(func() error {
if formats[i] == nil || !endpoints[i].IsLocal { if formats[i] == nil || !endpoints[i].IsLocal {
return nil return nil
@ -641,7 +637,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
for i := range setCount { for i := range setCount {
hostCount := make(map[string]int, setDriveCount) hostCount := make(map[string]int, setDriveCount)
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
disk := storageDisks[i*setDriveCount+j] disk := storageDisks[i*setDriveCount+j]
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
@ -662,7 +658,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
return return
} }
logger.Info(" * Set %v:", i+1) logger.Info(" * Set %v:", i+1)
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
disk := storageDisks[i*setDriveCount+j] disk := storageDisks[i*setDriveCount+j]
logger.Info(" - Drive: %s", disk.String()) logger.Info(" - Drive: %s", disk.String())
} }

View File

@ -48,7 +48,7 @@ func TestFixFormatV3(t *testing.T) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 8) formats := make([]*formatErasureV3, 8)
for j := 0; j < 8; j++ { for j := range 8 {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[0][j] newFormat.Erasure.This = format.Erasure.Sets[0][j]
formats[j] = newFormat formats[j] = newFormat
@ -79,7 +79,7 @@ func TestFormatErasureEmpty(t *testing.T) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 16) formats := make([]*formatErasureV3, 16)
for j := 0; j < 16; j++ { for j := range 16 {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[0][j] newFormat.Erasure.This = format.Erasure.Sets[0][j]
formats[j] = newFormat formats[j] = newFormat
@ -276,8 +276,8 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 32) formats := make([]*formatErasureV3, 32)
for i := 0; i < setCount; i++ { for i := range setCount {
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat formats[i*setDriveCount+j] = newFormat
@ -390,18 +390,17 @@ func BenchmarkGetFormatErasureInQuorumOld(b *testing.B) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 15*200) formats := make([]*formatErasureV3, 15*200)
for i := 0; i < setCount; i++ { for i := range setCount {
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat formats[i*setDriveCount+j] = newFormat
} }
} }
b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_, _ = getFormatErasureInQuorumOld(formats) _, _ = getFormatErasureInQuorumOld(formats)
} }
} }
@ -414,18 +413,17 @@ func BenchmarkGetFormatErasureInQuorum(b *testing.B) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 15*200) formats := make([]*formatErasureV3, 15*200)
for i := 0; i < setCount; i++ { for i := range setCount {
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat formats[i*setDriveCount+j] = newFormat
} }
} }
b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_, _ = getFormatErasureInQuorum(formats) _, _ = getFormatErasureInQuorum(formats)
} }
} }
@ -440,8 +438,8 @@ func TestNewFormatSets(t *testing.T) {
formats := make([]*formatErasureV3, 32) formats := make([]*formatErasureV3, 32)
errs := make([]error, 32) errs := make([]error, 32)
for i := 0; i < setCount; i++ { for i := range setCount {
for j := 0; j < setDriveCount; j++ { for j := range setDriveCount {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat formats[i*setDriveCount+j] = newFormat

View File

@ -98,7 +98,7 @@ func (m *minioFileInfo) IsDir() bool {
return m.isDir return m.isDir
} }
func (m *minioFileInfo) Sys() interface{} { func (m *minioFileInfo) Sys() any {
return nil return nil
} }
@ -316,7 +316,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
claims := make(map[string]interface{}) claims := make(map[string]any)
claims[expClaim] = UTCNow().Add(expiryDur).Unix() claims[expClaim] = UTCNow().Add(expiryDur).Unix()
claims[ldapUser] = lookupResult.NormDN claims[ldapUser] = lookupResult.NormDN

View File

@ -33,14 +33,14 @@ var globalRemoteFTPClientTransport = NewRemoteTargetHTTPTransport(true)()
type minioLogger struct{} type minioLogger struct{}
// Print implement Logger // Print implement Logger
func (log *minioLogger) Print(sessionID string, message interface{}) { func (log *minioLogger) Print(sessionID string, message any) {
if serverDebugLog { if serverDebugLog {
fmt.Printf("%s %s\n", sessionID, message) fmt.Printf("%s %s\n", sessionID, message)
} }
} }
// Printf implement Logger // Printf implement Logger
func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) { func (log *minioLogger) Printf(sessionID string, format string, v ...any) {
if serverDebugLog { if serverDebugLog {
if sessionID != "" { if sessionID != "" {
fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...)) fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...))

View File

@ -23,6 +23,7 @@ import (
"net/http" "net/http"
"path" "path"
"runtime/debug" "runtime/debug"
"slices"
"strings" "strings"
"sync/atomic" "sync/atomic"
"time" "time"
@ -396,18 +397,16 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler {
if k == "delimiter" { // delimiters are allowed to have `.` or `..` if k == "delimiter" { // delimiters are allowed to have `.` or `..`
continue continue
} }
for _, v := range vv { if slices.ContainsFunc(vv, hasBadPathComponent) {
if hasBadPathComponent(v) { if ok {
if ok { tc.FuncName = "handler.ValidRequest"
tc.FuncName = "handler.ValidRequest" tc.ResponseRecorder.LogErrBody = true
tc.ResponseRecorder.LogErrBody = true
}
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
return
} }
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
return
} }
} }
if hasMultipleAuth(r) { if hasMultipleAuth(r) {

View File

@ -90,7 +90,7 @@ var isHTTPHeaderSizeTooLargeTests = []struct {
func generateHeader(size, usersize int) http.Header { func generateHeader(size, usersize int) http.Header {
header := http.Header{} header := http.Header{}
for i := 0; i < size; i++ { for i := range size {
header.Set(strconv.Itoa(i), "") header.Set(strconv.Itoa(i), "")
} }
userlength := 0 userlength := 0
@ -136,7 +136,6 @@ var containsReservedMetadataTests = []struct {
func TestContainsReservedMetadata(t *testing.T) { func TestContainsReservedMetadata(t *testing.T) {
for _, test := range containsReservedMetadataTests { for _, test := range containsReservedMetadataTests {
test := test
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
contains := containsReservedMetadata(test.header) contains := containsReservedMetadata(test.header)
if contains && !test.shouldFail { if contains && !test.shouldFail {
@ -201,7 +200,7 @@ func Benchmark_hasBadPathComponent(t *testing.B) {
t.Run(tt.name, func(b *testing.B) { t.Run(tt.name, func(b *testing.B) {
b.SetBytes(int64(len(tt.input))) b.SetBytes(int64(len(tt.input)))
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
if got := hasBadPathComponent(tt.input); got != tt.want { if got := hasBadPathComponent(tt.input); got != tt.want {
t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want) t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want)
} }

View File

@ -292,7 +292,7 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
return contentEnc return contentEnc
} }
var newEncs []string var newEncs []string
for _, enc := range strings.Split(contentEnc, ",") { for enc := range strings.SplitSeq(contentEnc, ",") {
if enc != streamingContentEncoding { if enc != streamingContentEncoding {
newEncs = append(newEncs, enc) newEncs = append(newEncs, enc)
} }

View File

@ -54,10 +54,7 @@ func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err er
case h.IsSuffixLength: case h.IsSuffixLength:
specifiedLen := -h.Start specifiedLen := -h.Start
rangeLength = specifiedLen rangeLength = min(specifiedLen, resourceSize)
if specifiedLen > resourceSize {
rangeLength = resourceSize
}
case h.Start >= resourceSize: case h.Start >= resourceSize:
return 0, InvalidRange{ return 0, InvalidRange{
@ -98,10 +95,7 @@ func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64
start = h.Start start = h.Start
if h.IsSuffixLength { if h.IsSuffixLength {
start = resourceSize + h.Start start = max(resourceSize+h.Start, 0)
if start < 0 {
start = 0
}
} }
return start, length, nil return start, length, nil
} }

View File

@ -98,7 +98,7 @@ func (ies *IAMEtcdStore) getUsersSysType() UsersSysType {
return ies.usersSysType return ies.usersSysType
} }
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error { func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item any, itemPath string, opts ...options) error {
data, err := json.Marshal(item) data, err := json.Marshal(item)
if err != nil { if err != nil {
return err return err
@ -114,7 +114,7 @@ func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, it
return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...) return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...)
} }
func getIAMConfig(item interface{}, data []byte, itemPath string) error { func getIAMConfig(item any, data []byte, itemPath string) error {
data, err := decryptData(data, itemPath) data, err := decryptData(data, itemPath)
if err != nil { if err != nil {
return err return err
@ -123,7 +123,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error {
return json.Unmarshal(data, item) return json.Unmarshal(data, item)
} }
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error { func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item any, path string) error {
data, err := readKeyEtcd(ctx, ies.client, path) data, err := readKeyEtcd(ctx, ies.client, path)
if err != nil { if err != nil {
return err return err

View File

@ -22,6 +22,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"maps"
"path" "path"
"strings" "strings"
"sync" "sync"
@ -80,7 +81,7 @@ func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType {
return iamOS.usersSysType return iamOS.usersSysType
} }
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error { func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item any, objPath string, opts ...options) error {
json := jsoniter.ConfigCompatibleWithStandardLibrary json := jsoniter.ConfigCompatibleWithStandardLibrary
data, err := json.Marshal(item) data, err := json.Marshal(item)
if err != nil { if err != nil {
@ -135,7 +136,7 @@ func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context,
return data, meta, nil return data, meta, nil
} }
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error { func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item any, objPath string) error {
data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath) data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath)
if err != nil { if err != nil {
return err return err
@ -294,7 +295,6 @@ func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IA
g := errgroup.WithNErrs(len(users)) g := errgroup.WithNErrs(len(users))
for index := range users { for index := range users {
index := index
g.Go(func() error { g.Go(func() error {
userName := path.Dir(users[index]) userName := path.Dir(users[index])
user, err := iamOS.loadUserIdentity(ctx, userName, userType) user, err := iamOS.loadUserIdentity(ctx, userName, userType)
@ -413,7 +413,6 @@ func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, use
g := errgroup.WithNErrs(len(users)) g := errgroup.WithNErrs(len(users))
for index := range users { for index := range users {
index := index
g.Go(func() error { g.Go(func() error {
userName := strings.TrimSuffix(users[index], ".json") userName := strings.TrimSuffix(users[index], ".json")
userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup) userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup)
@ -538,7 +537,6 @@ func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, polici
g := errgroup.WithNErrs(len(policies)) g := errgroup.WithNErrs(len(policies))
for index := range policies { for index := range policies {
index := index
g.Go(func() error { g.Go(func() error {
policyName := path.Dir(policies[index]) policyName := path.Dir(policies[index])
policyDoc, err := iamOS.loadPolicy(ctx, policyName) policyDoc, err := iamOS.loadPolicy(ctx, policyName)
@ -776,9 +774,7 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
} }
// Copy svcUsersMap to cache.iamUsersMap // Copy svcUsersMap to cache.iamUsersMap
for k, v := range svcUsersMap { maps.Copy(cache.iamUsersMap, svcUsersMap)
cache.iamUsersMap[k] = v
}
cache.buildUserGroupMemberships() cache.buildUserGroupMemberships()

View File

@ -23,6 +23,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"maps"
"path" "path"
"sort" "sort"
"strings" "strings"
@ -159,7 +160,7 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string
type UserIdentity struct { type UserIdentity struct {
Version int `json:"version"` Version int `json:"version"`
Credentials auth.Credentials `json:"credentials"` Credentials auth.Credentials `json:"credentials"`
UpdatedAt time.Time `json:"updatedAt,omitempty"` UpdatedAt time.Time `json:"updatedAt"`
} }
func newUserIdentity(cred auth.Credentials) UserIdentity { func newUserIdentity(cred auth.Credentials) UserIdentity {
@ -171,7 +172,7 @@ type GroupInfo struct {
Version int `json:"version"` Version int `json:"version"`
Status string `json:"status"` Status string `json:"status"`
Members []string `json:"members"` Members []string `json:"members"`
UpdatedAt time.Time `json:"updatedAt,omitempty"` UpdatedAt time.Time `json:"updatedAt"`
} }
func newGroupInfo(members []string) GroupInfo { func newGroupInfo(members []string) GroupInfo {
@ -182,7 +183,7 @@ func newGroupInfo(members []string) GroupInfo {
type MappedPolicy struct { type MappedPolicy struct {
Version int `json:"version"` Version int `json:"version"`
Policies string `json:"policy"` Policies string `json:"policy"`
UpdatedAt time.Time `json:"updatedAt,omitempty"` UpdatedAt time.Time `json:"updatedAt"`
} }
// mappedPoliciesToMap copies the map of mapped policies to a regular map. // mappedPoliciesToMap copies the map of mapped policies to a regular map.
@ -198,7 +199,7 @@ func mappedPoliciesToMap(m *xsync.MapOf[string, MappedPolicy]) map[string]Mapped
// converts a mapped policy into a slice of distinct policies // converts a mapped policy into a slice of distinct policies
func (mp MappedPolicy) toSlice() []string { func (mp MappedPolicy) toSlice() []string {
var policies []string var policies []string
for _, policy := range strings.Split(mp.Policies, ",") { for policy := range strings.SplitSeq(mp.Policies, ",") {
if strings.TrimSpace(policy) == "" { if strings.TrimSpace(policy) == "" {
continue continue
} }
@ -219,8 +220,8 @@ func newMappedPolicy(policy string) MappedPolicy {
type PolicyDoc struct { type PolicyDoc struct {
Version int `json:",omitempty"` Version int `json:",omitempty"`
Policy policy.Policy Policy policy.Policy
CreateDate time.Time `json:",omitempty"` CreateDate time.Time
UpdateDate time.Time `json:",omitempty"` UpdateDate time.Time
} }
func newPolicyDoc(p policy.Policy) PolicyDoc { func newPolicyDoc(p policy.Policy) PolicyDoc {
@ -400,7 +401,6 @@ func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool,
g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time. g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time.
for index := range groups { for index := range groups {
index := index
g.Go(func() error { g.Go(func() error {
err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap) err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap)
if err != nil && !errors.Is(err, errNoSuchPolicy) { if err != nil && !errors.Is(err, errNoSuchPolicy) {
@ -610,8 +610,8 @@ type IAMStorageAPI interface {
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error saveIAMConfig(ctx context.Context, item any, path string, opts ...options) error
loadIAMConfig(ctx context.Context, item interface{}, path string) error loadIAMConfig(ctx context.Context, item any, path string) error
deleteIAMConfig(ctx context.Context, path string) error deleteIAMConfig(ctx context.Context, path string) error
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
@ -839,7 +839,7 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string,
return policies, nil return policies, nil
} }
if store.policy != nil { if store.policy != nil {
val, err, _ := store.policy.Do(name, func() (interface{}, error) { val, err, _ := store.policy.Do(name, func() (any, error) {
return getPolicies() return getPolicies()
}) })
if err != nil { if err != nil {
@ -1614,9 +1614,7 @@ func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Polic
} }
cache := store.lock() cache := store.lock()
for policy, p := range m { maps.Copy(cache.iamPolicyDocsMap, m)
cache.iamPolicyDocsMap[policy] = p
}
store.unlock() store.unlock()
for policy, p := range m { for policy, p := range m {
@ -2909,7 +2907,7 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred
func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error { func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error {
groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn
newCachePopulate := func() (val interface{}, err error) { newCachePopulate := func() (val any, err error) {
newCache := newIamCache() newCache := newIamCache()
// Check for service account first // Check for service account first
@ -2975,7 +2973,7 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
} }
var ( var (
val interface{} val any
err error err error
) )
if store.group != nil { if store.group != nil {
@ -3007,30 +3005,20 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
return true return true
}) })
for k, v := range newCache.iamGroupsMap { maps.Copy(cache.iamGroupsMap, newCache.iamGroupsMap)
cache.iamGroupsMap[k] = v
}
for k, v := range newCache.iamPolicyDocsMap { maps.Copy(cache.iamPolicyDocsMap, newCache.iamPolicyDocsMap)
cache.iamPolicyDocsMap[k] = v
}
for k, v := range newCache.iamUserGroupMemberships { maps.Copy(cache.iamUserGroupMemberships, newCache.iamUserGroupMemberships)
cache.iamUserGroupMemberships[k] = v
}
newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool { newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool {
cache.iamUserPolicyMap.Store(k, v) cache.iamUserPolicyMap.Store(k, v)
return true return true
}) })
for k, v := range newCache.iamUsersMap { maps.Copy(cache.iamUsersMap, newCache.iamUsersMap)
cache.iamUsersMap[k] = v
}
for k, v := range newCache.iamSTSAccountsMap { maps.Copy(cache.iamSTSAccountsMap, newCache.iamSTSAccountsMap)
cache.iamSTSAccountsMap[k] = v
}
newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool { newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool {
cache.iamSTSPolicyMap.Store(k, v) cache.iamSTSPolicyMap.Store(k, v)

View File

@ -1056,7 +1056,7 @@ type newServiceAccountOpts struct {
expiration *time.Time expiration *time.Time
allowSiteReplicatorAccount bool // allow creating internal service account for site-replication. allowSiteReplicatorAccount bool // allow creating internal service account for site-replication.
claims map[string]interface{} claims map[string]any
} }
// NewServiceAccount - create a new service account // NewServiceAccount - create a new service account
@ -1099,7 +1099,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount { if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount {
return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed
} }
m := make(map[string]interface{}) m := make(map[string]any)
m[parentClaim] = parentUser m[parentClaim] = parentUser
if len(policyBuf) > 0 { if len(policyBuf) > 0 {
@ -1345,7 +1345,7 @@ func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) (
} }
// GetClaimsForSvcAcc - gets the claims associated with the service account. // GetClaimsForSvcAcc - gets the claims associated with the service account.
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) { func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]any, error) {
if !sys.Initialized() { if !sys.Initialized() {
return nil, errServerNotInitialized return nil, errServerNotInitialized
} }
@ -1696,10 +1696,8 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...)) return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...))
} }
for k, v := range updatedKeysMap { // Replace the map values with the updated ones
// Replace the map values with the updated ones maps.Copy(accessKeyMap, updatedKeysMap)
accessKeyMap[k] = v
}
return skippedAccessKeys, nil return skippedAccessKeys, nil
} }

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"errors" "errors"
"maps"
"net/http" "net/http"
"time" "time"
@ -110,9 +111,7 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b
return nil, nil, false, errAuthentication return nil, nil, false, errAuthentication
} }
for k, v := range eclaims { maps.Copy(claims.MapClaims, eclaims)
claims.MapClaims[k] = v
}
// if root access is disabled, disable all its service accounts and temporary credentials. // if root access is disabled, disable all its service accounts and temporary credentials.
if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() { if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() {

View File

@ -175,7 +175,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
fn := authenticateNode fn := authenticateNode
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
fn(creds.AccessKey, creds.SecretKey) fn(creds.AccessKey, creds.SecretKey)
} }
}) })
@ -183,7 +183,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
fn := newCachedAuthToken() fn := newCachedAuthToken()
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
fn() fn()
} }
}) })

View File

@ -139,7 +139,7 @@ func pickRelevantGoroutines() (gs []string) {
// get runtime stack buffer. // get runtime stack buffer.
buf := debug.Stack() buf := debug.Stack()
// runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" . // runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" .
for _, g := range strings.Split(string(buf), "\n\n") { for g := range strings.SplitSeq(string(buf), "\n\n") {
// Again split on a new line, the first line of the second half contains the info about the go routine. // Again split on a new line, the first line of the second half contains the info about the go routine.
sl := strings.SplitN(g, "\n", 2) sl := strings.SplitN(g, "\n", 2)
if len(sl) != 2 { if len(sl) != 2 {

View File

@ -329,7 +329,7 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
lris, ok := l.lockMap[resource] lris, ok := l.lockMap[resource]
if !ok { if !ok {
// Just to be safe, delete uuids. // Just to be safe, delete uuids.
for idx := 0; idx < maxDeleteList; idx++ { for idx := range maxDeleteList {
mapID := formatUUID(uid, idx) mapID := formatUUID(uid, idx)
if _, ok := l.lockUID[mapID]; !ok { if _, ok := l.lockUID[mapID]; !ok {
break break

View File

@ -279,12 +279,12 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) {
} }
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) { t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
l := newLocker() l := newLocker()
for i := 0; i < locks; i++ { for range locks {
var tmp [16]byte var tmp [16]byte
rng.Read(tmp[:]) rng.Read(tmp[:])
res := []string{hex.EncodeToString(tmp[:])} res := []string{hex.EncodeToString(tmp[:])}
for i := 0; i < readers; i++ { for range readers {
rng.Read(tmp[:]) rng.Read(tmp[:])
ok, err := l.RLock(t.Context(), dsync.LockArgs{ ok, err := l.RLock(t.Context(), dsync.LockArgs{
UID: uuid.NewString(), UID: uuid.NewString(),
@ -366,12 +366,12 @@ func Test_localLocker_RUnlock(t *testing.T) {
} }
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) { t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
l := newLocker() l := newLocker()
for i := 0; i < locks; i++ { for range locks {
var tmp [16]byte var tmp [16]byte
rng.Read(tmp[:]) rng.Read(tmp[:])
res := []string{hex.EncodeToString(tmp[:])} res := []string{hex.EncodeToString(tmp[:])}
for i := 0; i < readers; i++ { for range readers {
rng.Read(tmp[:]) rng.Read(tmp[:])
ok, err := l.RLock(t.Context(), dsync.LockArgs{ ok, err := l.RLock(t.Context(), dsync.LockArgs{
UID: uuid.NewString(), UID: uuid.NewString(),

View File

@ -8,211 +8,211 @@ import (
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
) )
func proxyLogIf(ctx context.Context, err error, errKind ...interface{}) { func proxyLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "proxy", err, errKind...) logger.LogIf(ctx, "proxy", err, errKind...)
} }
func replLogIf(ctx context.Context, err error, errKind ...interface{}) { func replLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "replication", err, errKind...) logger.LogIf(ctx, "replication", err, errKind...)
} }
func replLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func replLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "replication", err, id, errKind...) logger.LogOnceIf(ctx, "replication", err, id, errKind...)
} }
func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "iam", err, id, errKind...) logger.LogOnceIf(ctx, "iam", err, id, errKind...)
} }
func iamLogIf(ctx context.Context, err error, errKind ...interface{}) { func iamLogIf(ctx context.Context, err error, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) { if !errors.Is(err, grid.ErrDisconnected) {
logger.LogIf(ctx, "iam", err, errKind...) logger.LogIf(ctx, "iam", err, errKind...)
} }
} }
func iamLogEvent(ctx context.Context, msg string, args ...interface{}) { func iamLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "iam", msg, args...) logger.Event(ctx, "iam", msg, args...)
} }
func rebalanceLogIf(ctx context.Context, err error, errKind ...interface{}) { func rebalanceLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "rebalance", err, errKind...) logger.LogIf(ctx, "rebalance", err, errKind...)
} }
func rebalanceLogEvent(ctx context.Context, msg string, args ...interface{}) { func rebalanceLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "rebalance", msg, args...) logger.Event(ctx, "rebalance", msg, args...)
} }
func adminLogIf(ctx context.Context, err error, errKind ...interface{}) { func adminLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "admin", err, errKind...) logger.LogIf(ctx, "admin", err, errKind...)
} }
func authNLogIf(ctx context.Context, err error, errKind ...interface{}) { func authNLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "authN", err, errKind...) logger.LogIf(ctx, "authN", err, errKind...)
} }
func authZLogIf(ctx context.Context, err error, errKind ...interface{}) { func authZLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "authZ", err, errKind...) logger.LogIf(ctx, "authZ", err, errKind...)
} }
func peersLogIf(ctx context.Context, err error, errKind ...interface{}) { func peersLogIf(ctx context.Context, err error, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) { if !errors.Is(err, grid.ErrDisconnected) {
logger.LogIf(ctx, "peers", err, errKind...) logger.LogIf(ctx, "peers", err, errKind...)
} }
} }
func peersLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { func peersLogAlwaysIf(ctx context.Context, err error, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) { if !errors.Is(err, grid.ErrDisconnected) {
logger.LogAlwaysIf(ctx, "peers", err, errKind...) logger.LogAlwaysIf(ctx, "peers", err, errKind...)
} }
} }
func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) { if !errors.Is(err, grid.ErrDisconnected) {
logger.LogOnceIf(ctx, "peers", err, id, errKind...) logger.LogOnceIf(ctx, "peers", err, id, errKind...)
} }
} }
func bugLogIf(ctx context.Context, err error, errKind ...interface{}) { func bugLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "internal", err, errKind...) logger.LogIf(ctx, "internal", err, errKind...)
} }
func healingLogIf(ctx context.Context, err error, errKind ...interface{}) { func healingLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "healing", err, errKind...) logger.LogIf(ctx, "healing", err, errKind...)
} }
func healingLogEvent(ctx context.Context, msg string, args ...interface{}) { func healingLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "healing", msg, args...) logger.Event(ctx, "healing", msg, args...)
} }
func healingLogOnceIf(ctx context.Context, err error, errKind ...interface{}) { func healingLogOnceIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "healing", err, errKind...) logger.LogIf(ctx, "healing", err, errKind...)
} }
func batchLogIf(ctx context.Context, err error, errKind ...interface{}) { func batchLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "batch", err, errKind...) logger.LogIf(ctx, "batch", err, errKind...)
} }
func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "batch", err, id, errKind...) logger.LogOnceIf(ctx, "batch", err, id, errKind...)
} }
func bootLogIf(ctx context.Context, err error, errKind ...interface{}) { func bootLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "bootstrap", err, errKind...) logger.LogIf(ctx, "bootstrap", err, errKind...)
} }
func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...) logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...)
} }
func dnsLogIf(ctx context.Context, err error, errKind ...interface{}) { func dnsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "dns", err, errKind...) logger.LogIf(ctx, "dns", err, errKind...)
} }
func internalLogIf(ctx context.Context, err error, errKind ...interface{}) { func internalLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "internal", err, errKind...) logger.LogIf(ctx, "internal", err, errKind...)
} }
func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "internal", err, id, errKind...) logger.LogOnceIf(ctx, "internal", err, id, errKind...)
} }
func transitionLogIf(ctx context.Context, err error, errKind ...interface{}) { func transitionLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "transition", err, errKind...) logger.LogIf(ctx, "transition", err, errKind...)
} }
func configLogIf(ctx context.Context, err error, errKind ...interface{}) { func configLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "config", err, errKind...) logger.LogIf(ctx, "config", err, errKind...)
} }
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "config", err, id, errKind...) logger.LogOnceIf(ctx, "config", err, id, errKind...)
} }
func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...) logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...)
} }
func scannerLogIf(ctx context.Context, err error, errKind ...interface{}) { func scannerLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "scanner", err, errKind...) logger.LogIf(ctx, "scanner", err, errKind...)
} }
func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "scanner", err, id, errKind...) logger.LogOnceIf(ctx, "scanner", err, id, errKind...)
} }
func ilmLogIf(ctx context.Context, err error, errKind ...interface{}) { func ilmLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "ilm", err, errKind...) logger.LogIf(ctx, "ilm", err, errKind...)
} }
func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "ilm", err, id, errKind...) logger.LogOnceIf(ctx, "ilm", err, id, errKind...)
} }
func encLogIf(ctx context.Context, err error, errKind ...interface{}) { func encLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "encryption", err, errKind...) logger.LogIf(ctx, "encryption", err, errKind...)
} }
func encLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func encLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "encryption", err, id, errKind...) logger.LogOnceIf(ctx, "encryption", err, id, errKind...)
} }
func storageLogIf(ctx context.Context, err error, errKind ...interface{}) { func storageLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "storage", err, errKind...) logger.LogIf(ctx, "storage", err, errKind...)
} }
func storageLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { func storageLogAlwaysIf(ctx context.Context, err error, errKind ...any) {
logger.LogAlwaysIf(ctx, "storage", err, errKind...) logger.LogAlwaysIf(ctx, "storage", err, errKind...)
} }
func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "storage", err, id, errKind...) logger.LogOnceIf(ctx, "storage", err, id, errKind...)
} }
func decomLogIf(ctx context.Context, err error, errKind ...interface{}) { func decomLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "decom", err, errKind...) logger.LogIf(ctx, "decom", err, errKind...)
} }
func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "decom", err, id, errKind...) logger.LogOnceIf(ctx, "decom", err, id, errKind...)
} }
func decomLogEvent(ctx context.Context, msg string, args ...interface{}) { func decomLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "decom", msg, args...) logger.Event(ctx, "decom", msg, args...)
} }
func etcdLogIf(ctx context.Context, err error, errKind ...interface{}) { func etcdLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "etcd", err, errKind...) logger.LogIf(ctx, "etcd", err, errKind...)
} }
func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "etcd", err, id, errKind...) logger.LogOnceIf(ctx, "etcd", err, id, errKind...)
} }
func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) { func metricsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "metrics", err, errKind...) logger.LogIf(ctx, "metrics", err, errKind...)
} }
func s3LogIf(ctx context.Context, err error, errKind ...interface{}) { func s3LogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "s3", err, errKind...) logger.LogIf(ctx, "s3", err, errKind...)
} }
func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "sftp", err, id, errKind...) logger.LogOnceIf(ctx, "sftp", err, id, errKind...)
} }
func shutdownLogIf(ctx context.Context, err error, errKind ...interface{}) { func shutdownLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "shutdown", err, errKind...) logger.LogIf(ctx, "shutdown", err, errKind...)
} }
func stsLogIf(ctx context.Context, err error, errKind ...interface{}) { func stsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "sts", err, errKind...) logger.LogIf(ctx, "sts", err, errKind...)
} }
func tierLogIf(ctx context.Context, err error, errKind ...interface{}) { func tierLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "tier", err, errKind...) logger.LogIf(ctx, "tier", err, errKind...)
} }
func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) { func kmsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "kms", err, errKind...) logger.LogIf(ctx, "kms", err, errKind...)
} }
@ -220,11 +220,11 @@ func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) {
type KMSLogger struct{} type KMSLogger struct{}
// LogOnceIf is the implementation of LogOnceIf, accessible using the Logger interface // LogOnceIf is the implementation of LogOnceIf, accessible using the Logger interface
func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "kms", err, id, errKind...) logger.LogOnceIf(ctx, "kms", err, id, errKind...)
} }
// LogIf is the implementation of LogIf, accessible using the Logger interface // LogIf is the implementation of LogIf, accessible using the Logger interface
func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...interface{}) { func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "kms", err, errKind...) logger.LogIf(ctx, "kms", err, errKind...)
} }

View File

@ -20,6 +20,7 @@ package cmd
import ( import (
"context" "context"
"errors" "errors"
"maps"
"runtime/debug" "runtime/debug"
"sort" "sort"
"sync" "sync"
@ -70,7 +71,7 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache {
} }
} }
func (b *bucketMetacache) debugf(format string, data ...interface{}) { func (b *bucketMetacache) debugf(format string, data ...any) {
if serverDebugLog { if serverDebugLog {
console.Debugf(format+"\n", data...) console.Debugf(format+"\n", data...)
} }
@ -195,9 +196,7 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri
b.mu.RLock() b.mu.RLock()
defer b.mu.RUnlock() defer b.mu.RUnlock()
dst := make(map[string]metacache, len(b.caches)) dst := make(map[string]metacache, len(b.caches))
for k, v := range b.caches { maps.Copy(dst, b.caches)
dst[k] = v
}
// Copy indexes // Copy indexes
dst2 := make(map[string][]string, len(b.cachesRoot)) dst2 := make(map[string][]string, len(b.cachesRoot))
for k, v := range b.cachesRoot { for k, v := range b.cachesRoot {

View File

@ -33,7 +33,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
for i := range pathNames[:] { for i := range pathNames[:] {
pathNames[i] = fmt.Sprintf("prefix/%d", i) pathNames[i] = fmt.Sprintf("prefix/%d", i)
} }
for i := 0; i < elements; i++ { for i := range elements {
bm.findCache(listPathOptions{ bm.findCache(listPathOptions{
ID: mustGetUUID(), ID: mustGetUUID(),
Bucket: "", Bucket: "",
@ -49,8 +49,8 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
}) })
} }
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; b.Loop(); i++ {
bm.findCache(listPathOptions{ bm.findCache(listPathOptions{
ID: mustGetUUID(), ID: mustGetUUID(),
Bucket: "", Bucket: "",

View File

@ -633,7 +633,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
for testID, tt := range tests { for testID, tt := range tests {
rng := rand.New(rand.NewSource(0)) rng := rand.New(rand.NewSource(0))
// Run for a number of times, shuffling the input to ensure that output is consistent. // Run for a number of times, shuffling the input to ensure that output is consistent.
for i := 0; i < 10; i++ { for i := range 10 {
t.Run(fmt.Sprintf("test-%d-%s-run-%d", testID, tt.name, i), func(t *testing.T) { t.Run(fmt.Sprintf("test-%d-%s-run-%d", testID, tt.name, i), func(t *testing.T) {
if i > 0 { if i > 0 {
rng.Shuffle(len(tt.m), func(i, j int) { rng.Shuffle(len(tt.m), func(i, j int) {

View File

@ -38,8 +38,8 @@ func (o *listPathOptions) parseMarker() {
o.Marker = s[:start] o.Marker = s[:start]
end := strings.LastIndex(s, "]") end := strings.LastIndex(s, "]")
tag := strings.Trim(s[start:end], "[]") tag := strings.Trim(s[start:end], "[]")
tags := strings.Split(tag, ",") tags := strings.SplitSeq(tag, ",")
for _, tag := range tags { for tag := range tags {
kv := strings.Split(tag, ":") kv := strings.Split(tag, ":")
if len(kv) < 2 { if len(kv) < 2 {
continue continue

View File

@ -25,6 +25,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"maps"
"math/rand" "math/rand"
"strconv" "strconv"
"strings" "strings"
@ -162,13 +163,13 @@ func (o listPathOptions) newMetacache() metacache {
} }
} }
func (o *listPathOptions) debugf(format string, data ...interface{}) { func (o *listPathOptions) debugf(format string, data ...any) {
if serverDebugLog { if serverDebugLog {
console.Debugf(format+"\n", data...) console.Debugf(format+"\n", data...)
} }
} }
func (o *listPathOptions) debugln(data ...interface{}) { func (o *listPathOptions) debugln(data ...any) {
if serverDebugLog { if serverDebugLog {
console.Debugln(data...) console.Debugln(data...)
} }
@ -906,9 +907,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
fi := FileInfo{ fi := FileInfo{
Metadata: make(map[string]string, len(meta)), Metadata: make(map[string]string, len(meta)),
} }
for k, v := range meta { maps.Copy(fi.Metadata, meta)
fi.Metadata[k] = v
}
err := er.updateObjectMetaWithOpts(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks(), UpdateMetadataOpts{NoPersistence: true}) err := er.updateObjectMetaWithOpts(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks(), UpdateMetadataOpts{NoPersistence: true})
if err == nil { if err == nil {
break break

View File

@ -20,6 +20,7 @@ package cmd
import ( import (
"context" "context"
"fmt" "fmt"
"maps"
"math" "math"
"net/http" "net/http"
"runtime" "runtime"
@ -431,15 +432,9 @@ func (m *MetricV2) clone() MetricV2 {
VariableLabels: make(map[string]string, len(m.VariableLabels)), VariableLabels: make(map[string]string, len(m.VariableLabels)),
Histogram: make(map[string]uint64, len(m.Histogram)), Histogram: make(map[string]uint64, len(m.Histogram)),
} }
for k, v := range m.StaticLabels { maps.Copy(metric.StaticLabels, m.StaticLabels)
metric.StaticLabels[k] = v maps.Copy(metric.VariableLabels, m.VariableLabels)
} maps.Copy(metric.Histogram, m.Histogram)
for k, v := range m.VariableLabels {
metric.VariableLabels[k] = v
}
for k, v := range m.Histogram {
metric.Histogram[k] = v
}
return metric return metric
} }
@ -2492,10 +2487,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
"endpoint": ep, "endpoint": ep,
}, },
} }
dwntime := currDowntime dwntime := max(health.offlineDuration, currDowntime)
if health.offlineDuration > currDowntime {
dwntime = health.offlineDuration
}
downtimeDuration.Value = float64(dwntime / time.Second) downtimeDuration.Value = float64(dwntime / time.Second)
ml = append(ml, downtimeDuration) ml = append(ml, downtimeDuration)
} }

View File

@ -35,7 +35,7 @@ import (
type promLogger struct{} type promLogger struct{}
func (p promLogger) Println(v ...interface{}) { func (p promLogger) Println(v ...any) {
metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v)) metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v))
} }

View File

@ -45,7 +45,7 @@ func TestNSLockRace(t *testing.T) {
ctx := t.Context() ctx := t.Context()
for i := 0; i < 10000; i++ { for i := range 10000 {
nsLk := newNSLock(false) nsLk := newNSLock(false)
// lk1; ref=1 // lk1; ref=1

View File

@ -201,7 +201,6 @@ func TestCheckLocalServerAddr(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
err := CheckLocalServerAddr(testCase.serverAddr) err := CheckLocalServerAddr(testCase.serverAddr)
switch { switch {
@ -273,7 +272,6 @@ func TestSameLocalAddrs(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2) sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2)
if testCase.expectedErr != nil && err == nil { if testCase.expectedErr != nil && err == nil {

View File

@ -155,7 +155,6 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) []NotificationPeerErr { func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -170,7 +169,6 @@ func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string)
func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) []NotificationPeerErr { func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -185,7 +183,6 @@ func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) [
func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr { func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -200,7 +197,6 @@ func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup s
func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []NotificationPeerErr { func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -215,7 +211,6 @@ func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []
func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp bool) []NotificationPeerErr { func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp bool) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -230,7 +225,6 @@ func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp
func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []NotificationPeerErr { func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -245,7 +239,6 @@ func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []Notif
func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr { func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -260,7 +253,6 @@ func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey
func (sys *NotificationSys) LoadServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr { func (sys *NotificationSys) LoadServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
return errPeerNotReachable return errPeerNotReachable
@ -276,7 +268,6 @@ func (sys *NotificationSys) BackgroundHealStatus(ctx context.Context) ([]madmin.
ng := WithNPeers(len(sys.peerClients)) ng := WithNPeers(len(sys.peerClients))
states := make([]madmin.BgHealState, len(sys.peerClients)) states := make([]madmin.BgHealState, len(sys.peerClients))
for idx, client := range sys.peerClients { for idx, client := range sys.peerClients {
idx := idx
client := client client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
@ -485,7 +476,6 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe
locksResp := make([]*PeerLocks, len(sys.peerClients)) locksResp := make([]*PeerLocks, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients { for index, client := range sys.peerClients {
index := index
client := client client := client
g.Go(func() error { g.Go(func() error {
if client == nil { if client == nil {
@ -570,7 +560,6 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
replicationStats := make([]BucketStatsMap, len(sys.peerClients)) replicationStats := make([]BucketStatsMap, len(sys.peerClients))
for index, client := range sys.peerClients { for index, client := range sys.peerClients {
index := index
client := client client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
@ -612,7 +601,6 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
bucketStats := make([]BucketStats, len(sys.peerClients)) bucketStats := make([]BucketStats, len(sys.peerClients))
for index, client := range sys.peerClients { for index, client := range sys.peerClients {
index := index
client := client client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
@ -647,7 +635,6 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri
ng := WithNPeers(len(sys.peerClients)).WithRetries(1) ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
siteStats := make([]SRMetricsSummary, len(sys.peerClients)) siteStats := make([]SRMetricsSummary, len(sys.peerClients))
for index, client := range sys.peerClients { for index, client := range sys.peerClients {
index := index
client := client client := client
ng.Go(ctx, func() error { ng.Go(ctx, func() error {
if client == nil { if client == nil {
@ -926,7 +913,6 @@ func (sys *NotificationSys) GetResourceMetrics(ctx context.Context) <-chan Metri
g := errgroup.WithNErrs(len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients))
peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
for index := range sys.peerClients { for index := range sys.peerClients {
index := index
g.Go(func() error { g.Go(func() error {
if sys.peerClients[index] == nil { if sys.peerClients[index] == nil {
return errPeerNotReachable return errPeerNotReachable
@ -1302,7 +1288,6 @@ func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan MetricV
g := errgroup.WithNErrs(len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients))
peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
for index := range sys.peerClients { for index := range sys.peerClients {
index := index
g.Go(func() error { g.Go(func() error {
if sys.peerClients[index] == nil { if sys.peerClients[index] == nil {
return errPeerNotReachable return errPeerNotReachable
@ -1323,7 +1308,6 @@ func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan Metric
g := errgroup.WithNErrs(len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients))
peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
for index := range sys.peerClients { for index := range sys.peerClients {
index := index
g.Go(func() error { g.Go(func() error {
if sys.peerClients[index] == nil { if sys.peerClients[index] == nil {
return errPeerNotReachable return errPeerNotReachable

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"io" "io"
"maps"
"math" "math"
"net/http" "net/http"
"time" "time"
@ -290,9 +291,7 @@ func (o *ObjectInfo) Clone() (cinfo ObjectInfo) {
VersionPurgeStatusInternal: o.VersionPurgeStatusInternal, VersionPurgeStatusInternal: o.VersionPurgeStatusInternal,
} }
cinfo.UserDefined = make(map[string]string, len(o.UserDefined)) cinfo.UserDefined = make(map[string]string, len(o.UserDefined))
for k, v := range o.UserDefined { maps.Copy(cinfo.UserDefined, o.UserDefined)
cinfo.UserDefined[k] = v
}
return cinfo return cinfo
} }

View File

@ -156,7 +156,6 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te
} }
for i, testCase := range testCases { for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
var err error var err error
var resultL ListObjectsInfo var resultL ListObjectsInfo
@ -944,7 +943,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v
} }
for i, testCase := range testCases { for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys) t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys)
result, err := obj.ListObjects(t.Context(), testCase.bucketName, result, err := obj.ListObjects(t.Context(), testCase.bucketName,
@ -1676,7 +1674,6 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
} }
for i, testCase := range testCases { for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
result, err := obj.ListObjectVersions(t.Context(), testCase.bucketName, result, err := obj.ListObjectVersions(t.Context(), testCase.bucketName,
testCase.prefix, testCase.marker, "", testCase.delimiter, int(testCase.maxKeys)) testCase.prefix, testCase.marker, "", testCase.delimiter, int(testCase.maxKeys))
@ -1827,7 +1824,6 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr
} }
for i, testCase := range testCases { for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
var foundObjects []ObjectInfo var foundObjects []ObjectInfo
var foundPrefixes []string var foundPrefixes []string
@ -1914,7 +1910,7 @@ func BenchmarkListObjects(b *testing.B) {
} }
// Insert objects to be listed and benchmarked later. // Insert objects to be listed and benchmarked later.
for i := 0; i < 20000; i++ { for i := range 20000 {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(b.Context(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{}) _, err = obj.PutObject(b.Context(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{})
if err != nil { if err != nil {
@ -1922,10 +1918,8 @@ func BenchmarkListObjects(b *testing.B) {
} }
} }
b.ResetTimer()
// List the buckets over and over and over. // List the buckets over and over and over.
for i := 0; i < b.N; i++ { for b.Loop() {
_, err = obj.ListObjects(b.Context(), bucket, "", "obj9000", "", -1) _, err = obj.ListObjects(b.Context(), bucket, "", "obj9000", "", -1)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)

View File

@ -369,7 +369,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
for i := 0; i < 3; i++ { for range 3 {
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times. // Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
// Used to test the listing for the case of multiple uploadID's for a given object. // Used to test the listing for the case of multiple uploadID's for a given object.
res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts) res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts)
@ -392,7 +392,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
} }
// Initiate Multipart Upload on bucketNames[2]. // Initiate Multipart Upload on bucketNames[2].
// Used to test the listing for the case of multiple objects for a given bucket. // Used to test the listing for the case of multiple objects for a given bucket.
for i := 0; i < 6; i++ { for i := range 6 {
res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts) res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
@ -2167,7 +2167,6 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
} }
for _, testCase := range testCases { for _, testCase := range testCases {
testCase := testCase
t.(*testing.T).Run("", func(t *testing.T) { t.(*testing.T).Run("", func(t *testing.T) {
opts = ObjectOptions{} opts = ObjectOptions{}
actualResult, actualErr := obj.CompleteMultipartUpload(t.Context(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{}) actualResult, actualErr := obj.CompleteMultipartUpload(t.Context(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})

View File

@ -226,7 +226,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) { func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) {
attributes = make(map[string]struct{}) attributes = make(map[string]struct{})
for _, headerVal := range h.Values(xhttp.AmzObjectAttributes) { for _, headerVal := range h.Values(xhttp.AmzObjectAttributes) {
for _, v := range strings.Split(strings.TrimSpace(headerVal), ",") { for v := range strings.SplitSeq(strings.TrimSpace(headerVal), ",") {
if v != "" { if v != "" {
attributes[v] = struct{}{} attributes[v] = struct{}{}
} }

View File

@ -61,14 +61,14 @@ func benchmark(b *testing.B, data []string) {
b.Run("concat naive", func(b *testing.B) { b.Run("concat naive", func(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
concatNaive(data...) concatNaive(data...)
} }
}) })
b.Run("concat fast", func(b *testing.B) { b.Run("concat fast", func(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
concat(data...) concat(data...)
} }
}) })
@ -77,7 +77,7 @@ func benchmark(b *testing.B, data []string) {
func BenchmarkConcatImplementation(b *testing.B) { func BenchmarkConcatImplementation(b *testing.B) {
data := make([]string, 2) data := make([]string, 2)
rng := rand.New(rand.NewSource(0)) rng := rand.New(rand.NewSource(0))
for i := 0; i < 2; i++ { for i := range 2 {
var tmp [16]byte var tmp [16]byte
rng.Read(tmp[:]) rng.Read(tmp[:])
data[i] = hex.EncodeToString(tmp[:]) data[i] = hex.EncodeToString(tmp[:])
@ -91,7 +91,7 @@ func BenchmarkPathJoinOld(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
pathJoinOld("volume", "path/path/path") pathJoinOld("volume", "path/path/path")
} }
}) })
@ -102,7 +102,7 @@ func BenchmarkPathJoin(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
pathJoin("volume", "path/path/path") pathJoin("volume", "path/path/path")
} }
}) })

Some files were not shown because too many files have changed in this diff Show More