Run modernize (#21546)

`go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...` executed.

`go generate ./...` ran afterwards to keep generated.
This commit is contained in:
Klaus Post 2025-08-29 04:39:48 +02:00 committed by GitHub
parent 3b7cb6512c
commit f0b91e5504
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
238 changed files with 913 additions and 1257 deletions

View File

@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
}
}
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error {
data, err := io.ReadAll(body)
if err != nil {
return SRError{

View File

@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
// Create a policy policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
]
}
]
}`, bucket))
}`, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
userCount := 50
accessKeys := make([]string, userCount)
secretKeys := make([]string, userCount)
for i := 0; i < userCount; i++ {
for i := range userCount {
accessKey, secretKey := mustGenerateCredentials(c)
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
if err != nil {
@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
}
g := errgroup.Group{}
for i := 0; i < userCount; i++ {
for i := range userCount {
g.Go(func(i int) func() error {
return func() error {
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")

View File

@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"os"
"slices"
@ -157,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for k, v := range ldapUsers {
allCredentials[k] = v
}
maps.Copy(allCredentials, ldapUsers)
// Marshal the response
data, err := json.Marshal(allCredentials)
@ -2949,7 +2948,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.
name: createReq.Name,
description: description,
expiration: createReq.Expiration,
claims: make(map[string]interface{}),
claims: make(map[string]any),
}
condValues := getConditionValues(r, "", cred)

View File

@ -332,7 +332,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
// 2.2 create and associate policy to user
policy := "mypolicy-test-user-update"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -355,7 +355,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -562,7 +562,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
// 1. Create a policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -585,7 +585,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -680,7 +680,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
c.Fatalf("bucket creat error: %v", err)
}
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -703,7 +703,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
// Check that default policies can be overwritten.
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
@ -739,7 +739,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
}
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -762,7 +762,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -911,7 +911,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -934,7 +934,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -995,7 +995,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -1026,7 +1026,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -1093,7 +1093,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
// Create policy, user and associate policy
policy := "mypolicy"
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -1116,7 +1116,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
]
}
]
}`, bucket, bucket))
}`, bucket, bucket)
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
if err != nil {
c.Fatalf("policy add error: %v", err)
@ -1367,7 +1367,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -1381,7 +1381,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
]
}
]
}`, bucket))
}`, bucket)
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: accessKey,
@ -1558,7 +1558,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
c.Helper()
versions := []string{}
for i := 0; i < 5; i++ {
for range 5 {
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
if err != nil {
c.Fatalf("upload did not succeed got %#v", err)
@ -1627,7 +1627,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
svcAK, svcSK := mustGenerateCredentials(c)
// This policy does not allow listing objects.
policyBytes := []byte(fmt.Sprintf(`{
policyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -1641,7 +1641,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
]
}
]
}`, bucket))
}`, bucket)
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
Policy: policyBytes,
TargetUser: accessKey,
@ -1655,7 +1655,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
c.mustNotListObjects(ctx, svcClient, bucket)
// This policy allows listing objects.
newPolicyBytes := []byte(fmt.Sprintf(`{
newPolicyBytes := fmt.Appendf(nil, `{
"Version": "2012-10-17",
"Statement": [
{
@ -1668,7 +1668,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
]
}
]
}`, bucket))
}`, bucket)
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
NewPolicy: newPolicyBytes,
})

View File

@ -954,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
var args dsync.LockArgs
var lockers []dsync.NetLocker
for _, path := range strings.Split(vars["paths"], ",") {
for path := range strings.SplitSeq(vars["paths"], ",") {
if path == "" {
continue
}
@ -1193,7 +1193,7 @@ type dummyFileInfo struct {
mode os.FileMode
modTime time.Time
isDir bool
sys interface{}
sys any
}
func (f dummyFileInfo) Name() string { return f.name }
@ -1201,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size }
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
func (f dummyFileInfo) IsDir() bool { return f.isDir }
func (f dummyFileInfo) Sys() interface{} { return f.sys }
func (f dummyFileInfo) Sys() any { return f.sys }
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
// ----------

View File

@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool {
func TestTopLockEntries(t *testing.T) {
locksHeld := make(map[string][]lockRequesterInfo)
var owners []string
for i := 0; i < 4; i++ {
for i := range 4 {
owners = append(owners, fmt.Sprintf("node-%d", i))
}
@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) {
// request UID, but 10 different resource names associated with it.
var lris []lockRequesterInfo
uuid := mustGetUUID()
for i := 0; i < 10; i++ {
for i := range 10 {
resource := fmt.Sprintf("bucket/delete-object-%d", i)
lri := lockRequesterInfo{
Name: resource,
@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) {
}
// Add a few concurrent read locks to the mix
for i := 0; i < 50; i++ {
for i := range 50 {
resource := fmt.Sprintf("bucket/get-object-%d", i)
lri := lockRequesterInfo{
Name: resource,

View File

@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
"maps"
"net/http"
"sort"
"sync"
@ -520,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
for k, v := range h.scannedItemsMap {
retMap[k] = v
}
maps.Copy(retMap, h.scannedItemsMap)
return retMap
}
@ -534,9 +533,7 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
for k, v := range h.healedItemsMap {
retMap[k] = v
}
maps.Copy(retMap, h.healedItemsMap)
return retMap
}
@ -549,9 +546,7 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
maps.Copy(retMap, h.healFailedItemsMap)
return retMap
}

View File

@ -65,7 +65,7 @@ func setCommonHeaders(w http.ResponseWriter) {
}
// Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte {
func encodeResponse(response any) []byte {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
@ -83,7 +83,7 @@ func encodeResponse(response interface{}) []byte {
// Do not use this function for anything other than ListObjects()
// variants, please open a github discussion if you wish to use
// this in other places.
func encodeResponseList(response interface{}) []byte {
func encodeResponseList(response any) []byte {
var buf bytes.Buffer
buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
@ -94,7 +94,7 @@ func encodeResponseList(response interface{}) []byte {
}
// Encodes the response headers into JSON format.
func encodeResponseJSON(response interface{}) []byte {
func encodeResponseJSON(response any) []byte {
var bytesBuffer bytes.Buffer
e := json.NewEncoder(&bytesBuffer)
e.Encode(response)

View File

@ -100,7 +100,6 @@ func TestObjectLocation(t *testing.T) {
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {

View File

@ -216,7 +216,7 @@ func getSessionToken(r *http.Request) (token string) {
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
func mustGetClaimsFromToken(r *http.Request) map[string]any {
claims, _ := getClaimsFromToken(getSessionToken(r))
return claims
}
@ -266,7 +266,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error)
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(token string) (map[string]interface{}, error) {
func getClaimsFromToken(token string) (map[string]any, error) {
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
if err != nil {
return nil, err
@ -275,7 +275,7 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
}
// Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) {
token := getSessionToken(r)
if token != "" && cred.AccessKey == "" {
// x-amz-security-token is not allowed for anonymous access.

View File

@ -24,6 +24,7 @@ import (
"fmt"
"io"
"os"
"slices"
"sort"
"strings"
"sync"
@ -269,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
func (h *healingTracker) isHealed(bucket string) bool {
h.mu.RLock()
defer h.mu.RUnlock()
for _, v := range h.HealedBuckets {
if v == bucket {
return true
}
}
return false
return slices.Contains(h.HealedBuckets, bucket)
}
// resume will reset progress to the numbers at the start of the bucket.

View File

@ -25,6 +25,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"net/url"
@ -574,9 +575,7 @@ func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
}
for k, v := range objInfo.UserMetadata {
oi.UserDefined[k] = v
}
maps.Copy(oi.UserDefined, objInfo.UserMetadata)
return oi
}

View File

@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error {
type BatchJobSize int64
// UnmarshalYAML to parse humanized byte values
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error {
var batchExpireSz string
err := unmarshal(&batchExpireSz)
if err != nil {

View File

@ -21,6 +21,7 @@ import (
"context"
"encoding/base64"
"fmt"
"maps"
"math/rand"
"net/http"
"runtime"
@ -110,9 +111,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
}
}
e.kmsContext = kms.Context{}
for k, v := range ctx {
e.kmsContext[k] = v
}
maps.Copy(e.kmsContext, ctx)
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
return err
@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
// Since we are rotating the keys, make sure to update the metadata.
oi.metadataOnly = true
oi.keyRotation = true
for k, v := range encMetadata {
oi.UserDefined[k] = v
}
maps.Copy(oi.UserDefined, encMetadata)
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
VersionID: oi.VersionID,
}, ObjectOptions{

View File

@ -51,8 +51,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
// insert the object.
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
// insert the object.
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
for j := 0; j < totalPartsNR; j++ {
for j := range totalPartsNR {
if j < totalPartsNR-1 {
textPartData = textData[j*partSize : (j+1)*partSize-1]
} else {

View File

@ -154,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
for index := range bucketsToBeUpdatedSlice {
index := index
g.Go(func() error {
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
}, index)
@ -1387,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
// Set the correct hex md5sum for the fan-out stream.
fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil))
concurrentSize := 100
if runtime.GOMAXPROCS(0) < concurrentSize {
concurrentSize = runtime.GOMAXPROCS(0)
}
concurrentSize := min(runtime.GOMAXPROCS(0), 100)
fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries))
eventArgsList := make([]eventArgs, 0, len(fanOutEntries))

View File

@ -657,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
sha256sum := ""
var objectNames []string
for i := 0; i < 10; i++ {
for i := range 10 {
contentBytes := []byte("hello")
objectName := "test-object-" + strconv.Itoa(i)
if i == 0 {
@ -687,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// The following block will create a bucket policy with delete object to 'public/*'. This is
// to test a mixed response of a successful & failure while deleting objects in a single request
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)
rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
credentials.AccessKey, credentials.SecretKey, nil)

View File

@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"strconv"
"strings"
@ -959,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
UserDefined: meta,
}
}
for k, v := range objInfo.UserDefined {
meta[k] = v
}
maps.Copy(meta, objInfo.UserDefined)
if len(objInfo.UserTags) != 0 {
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
}

View File

@ -472,7 +472,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
return meta, reloaded, nil
}
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) {
val, err, _ := sys.group.Do(bucket, func() (val any, err error) {
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
if err != nil {
if !sys.Initialized() {
@ -511,7 +511,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
g := errgroup.WithNErrs(len(buckets))
bucketMetas := make([]BucketMetadata, len(buckets))
for index := range buckets {
index := index
g.Go(func() error {
// Sleep and stagger to avoid blocked CPU and thundering
// herd upon start up sequence.

View File

@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
var wg sync.WaitGroup
var mu sync.Mutex
wg.Add(n)
for i := 0; i < n; i++ {
for range n {
go func() {
defer wg.Done()
// Sync start.
@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Test case - 1.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Expecting StatusBadRequest (400).
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: maxBucketPolicySize + 1,
accessKey: credentials.AccessKey,
@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// Expecting the HTTP response status to be StatusLengthRequired (411).
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: 0,
accessKey: credentials.AccessKey,
@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// checkBucketPolicyResources should fail.
{
bucketName: bucketName1,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 404 StatusNotFound
{
bucketName: "non-existent-bucket",
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 404 StatusNotFound
{
bucketName: ".invalid-bucket",
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
// should result in 400 StatusBadRequest.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))),
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
accessKey: credentials.AccessKey,

View File

@ -19,6 +19,7 @@ package cmd
import (
"encoding/json"
"maps"
"net/http"
"net/url"
"strconv"
@ -187,9 +188,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
}
cloneURLValues := make(url.Values, len(r.Form))
for k, v := range r.Form {
cloneURLValues[k] = v
}
maps.Copy(cloneURLValues, r.Form)
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
@ -224,7 +223,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
// Add groups claim which could be a list. This will ensure that the claim
// `jwt:groups` works.
if grpsVal, ok := claims["groups"]; ok {
if grpsIs, ok := grpsVal.([]interface{}); ok {
if grpsIs, ok := grpsVal.([]any); ok {
grps := []string{}
for _, gI := range grpsIs {
if g, ok := gI.(string); ok {

View File

@ -21,6 +21,7 @@ import (
"bytes"
"context"
"fmt"
"maps"
"net/http"
"net/url"
"regexp"
@ -311,7 +312,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
if len(s) == 0 {
return
}
for _, p := range strings.Split(s, ",") {
for p := range strings.SplitSeq(s, ",") {
if p == "" {
continue
}
@ -735,9 +736,7 @@ type BucketReplicationResyncStatus struct {
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
m = make(map[string]TargetReplicationResyncStatus)
for arn, st := range rs.TargetsMap {
m[arn] = st
}
maps.Copy(m, rs.TargetsMap)
return
}

View File

@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"net/url"
@ -803,9 +804,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
} else {
cs, mp := getCRCMeta(objInfo, 0, nil)
// Set object checksum.
for k, v := range cs {
meta[k] = v
}
maps.Copy(meta, cs)
isMP = mp
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
// For objects where checksum is full object, it will be the same.
@ -969,9 +968,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
t, _ := tags.ParseObjectTags(oi1.UserTags)
oi2Map := make(map[string]string)
for k, v := range oi2.UserTags {
oi2Map[k] = v
}
maps.Copy(oi2Map, oi2.UserTags)
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
return replicateMetadata
}
@ -1770,9 +1767,7 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
}
if !copied {
dst = make(map[string]string, len(metadata))
for k, v := range metadata {
dst[k] = v
}
maps.Copy(dst, metadata)
copied = true
}
delete(dst, key)
@ -2954,7 +2949,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
}()
var wg sync.WaitGroup
for i := 0; i < resyncParallelRoutines; i++ {
for i := range resyncParallelRoutines {
wg.Add(1)
workers[i] = make(chan ReplicateObjectInfo, 100)
i := i
@ -3063,7 +3058,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
workers[h%uint64(resyncParallelRoutines)] <- roi
}
}
for i := 0; i < resyncParallelRoutines; i++ {
for i := range resyncParallelRoutines {
xioutil.SafeClose(workers[i])
}
wg.Wait()
@ -3193,11 +3188,9 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []stri
<-ctx.Done()
return
}
duration := time.Duration(r.Float64() * float64(time.Minute))
if duration < time.Second {
duration := max(time.Duration(r.Float64()*float64(time.Minute)),
// Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second
}
time.Second)
time.Sleep(duration)
}
}

View File

@ -19,6 +19,7 @@ package cmd
import (
"fmt"
"maps"
"math"
"sync/atomic"
"time"
@ -221,9 +222,7 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
}
if s.Failed.ErrCounts == nil {
s.Failed.ErrCounts = make(map[string]int)
for k, v := range st.Failed.ErrCounts {
s.Failed.ErrCounts[k] = v
}
maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts)
}
c.Stats[arn] = &s
}

View File

@ -20,6 +20,7 @@ package cmd
import (
"context"
"errors"
"maps"
"net/url"
"sync"
"time"
@ -236,9 +237,7 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth {
sys.hMutex.RLock()
defer sys.hMutex.RUnlock()
m := make(map[string]epHealth, len(sys.hc))
for k, v := range sys.hc {
m[k] = v
}
maps.Copy(m, sys.hc)
return m
}

View File

@ -57,11 +57,9 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) {
// callhome running on a different node.
// sleep for some time and try again.
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
if duration < time.Second {
duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())),
// Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second
}
time.Second)
time.Sleep(duration)
}
}()

View File

@ -105,7 +105,7 @@ func init() {
gob.Register(madmin.TimeInfo{})
gob.Register(madmin.XFSErrorConfigs{})
gob.Register(map[string]string{})
gob.Register(map[string]interface{}{})
gob.Register(map[string]any{})
// All minio-go and madmin-go API operations shall be performed only once,
// another way to look at this is we are turning off retries.
@ -258,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) {
if !serverDebugLog {
// Disable console logging if server debug log is not enabled
noLog := func(string, ...interface{}) {}
noLog := func(string, ...any) {}
consoleapi.LogInfo = noLog
consoleapi.LogError = noLog
@ -761,7 +761,7 @@ func serverHandleEnvVars() {
domains := env.Get(config.EnvDomain, "")
if len(domains) != 0 {
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
for domainName := range strings.SplitSeq(domains, config.ValueSeparator) {
if _, ok := dns2.IsDomainName(domainName); !ok {
logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName),
"Invalid MINIO_DOMAIN value in environment variable")
@ -1059,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
return time.Time{}, false
}
func (a bgCtx) Value(key interface{}) interface{} {
func (a bgCtx) Value(key any) any {
return a.parent.Value(key)
}

View File

@ -43,7 +43,6 @@ func Test_readFromSecret(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil {
@ -155,7 +154,6 @@ MINIO_ROOT_PASSWORD=minio123`,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
if err != nil {

View File

@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
"maps"
"strings"
"sync"
@ -78,12 +79,8 @@ func initHelp() {
config.BatchSubSys: batch.DefaultKVS,
config.BrowserSubSys: browser.DefaultKVS,
}
for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v
}
for k, v := range lambda.DefaultLambdaKVS {
kvs[k] = v
}
maps.Copy(kvs, notify.DefaultNotificationKVS)
maps.Copy(kvs, lambda.DefaultLambdaKVS)
if globalIsErasure {
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
kvs[config.HealSubSys] = heal.DefaultKVS

View File

@ -38,12 +38,12 @@ import (
)
// Save config file to corresponding backend
func Save(configFile string, data interface{}) error {
func Save(configFile string, data any) error {
return quick.SaveConfig(data, configFile, globalEtcdClient)
}
// Load config from backend
func Load(configFile string, data interface{}) (quick.Config, error) {
func Load(configFile string, data any) (quick.Config, error) {
return quick.LoadConfig(configFile, globalEtcdClient, data)
}

View File

@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte)
return saveConfig(ctx, objAPI, historyFile, kv)
}
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error {
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error {
data, err := json.Marshal(cfg)
if err != nil {
return err

View File

@ -101,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
lastN = make([]log.Info, last)
sys.RLock()
sys.logBuf.Do(func(p interface{}) {
sys.logBuf.Do(func(p any) {
if p != nil {
lg, ok := p.(log.Info)
if ok && lg.SendLog(node, logKind) {
@ -155,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats {
// Content returns the console stdout log
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
sys.RLock()
sys.logBuf.Do(func(p interface{}) {
sys.logBuf.Do(func(p any) {
if p != nil {
lg, ok := p.(log.Info)
if ok {
@ -181,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType {
// Send log message 'e' to console and publish to console
// log pubsub system
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error {
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error {
var lg log.Info
switch e := entry.(type) {
case log.Entry:

View File

@ -198,7 +198,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p
func (p *scannerMetrics) getCurrentPaths() []string {
var res []string
prefix := globalLocalNodeName + "/"
p.currentPaths.Range(func(key, value interface{}) bool {
p.currentPaths.Range(func(key, value any) bool {
// We are a bit paranoid, but better miss an entry than crash.
name, ok := key.(string)
if !ok {
@ -221,7 +221,7 @@ func (p *scannerMetrics) getCurrentPaths() []string {
// (since this is concurrent it may not be 100% reliable)
func (p *scannerMetrics) activeDrives() int {
var i int
p.currentPaths.Range(func(k, v interface{}) bool {
p.currentPaths.Range(func(k, v any) bool {
i++
return true
})
@ -299,7 +299,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
m.CollectedAt = time.Now()
m.ActivePaths = p.getCurrentPaths()
m.LifeTimeOps = make(map[string]uint64, scannerMetricLast)
for i := scannerMetric(0); i < scannerMetricLast; i++ {
for i := range scannerMetricLast {
if n := atomic.LoadUint64(&p.operations[i]); n > 0 {
m.LifeTimeOps[i.String()] = n
}
@ -309,7 +309,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
}
m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime)
for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ {
for i := range scannerMetricLastRealtime {
lm := p.lastMinute(i)
if lm.N > 0 {
m.LastMinute.Actions[i.String()] = lm.asTimedAction()

View File

@ -78,11 +78,9 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
// Run the data scanner in a loop
for {
runDataScanner(ctx, objAPI)
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
if duration < time.Second {
duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())),
// Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second
}
time.Second)
time.Sleep(duration)
}
}()

View File

@ -127,7 +127,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
v2 uuid-2 modTime -3m
v1 uuid-1 modTime -4m
*/
for i := 0; i < 5; i++ {
for i := range 5 {
fivs[i] = FileInfo{
Volume: bucket,
Name: obj,

View File

@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"path"
@ -99,9 +100,7 @@ func (ats *allTierStats) clone() *allTierStats {
}
dst := *ats
dst.Tiers = make(map[string]tierStats, len(ats.Tiers))
for tier, st := range ats.Tiers {
dst.Tiers[tier] = st
}
maps.Copy(dst.Tiers, ats.Tiers)
return &dst
}
@ -347,9 +346,7 @@ func (e dataUsageEntry) clone() dataUsageEntry {
// We operate on a copy from the receiver.
if e.Children != nil {
ch := make(dataUsageHashMap, len(e.Children))
for k, v := range e.Children {
ch[k] = v
}
maps.Copy(ch, e.Children)
e.Children = ch
}

View File

@ -179,7 +179,7 @@ func TestDataUsageUpdate(t *testing.T) {
t.Fatal(err)
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
for range dataUsageUpdateDirCycles {
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
@ -428,7 +428,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
t.Fatal(err)
}
// Changed dir must be picked up in this many cycles.
for i := 0; i < dataUsageUpdateDirCycles; i++ {
for range dataUsageUpdateDirCycles {
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
got.Info.NextCycle++
if err != nil {
@ -526,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
// generateUsageTestFiles create nFolders * nFiles files of size bytes each.
func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) {
pl := make([]byte, size)
for i := 0; i < nFolders; i++ {
for i := range nFolders {
name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt")
err := os.MkdirAll(filepath.Dir(name), os.ModePerm)
if err != nil {
t.Fatal(err)
}
for j := 0; j < nFiles; j++ {
for j := range nFiles {
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
err = os.WriteFile(name, pl, os.ModePerm)
if err != nil {
@ -618,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
}
// equalAsJSON returns whether the values are equal when encoded as JSON.
func equalAsJSON(a, b interface{}) bool {
func equalAsJSON(a, b any) bool {
aj, err := json.Marshal(a)
if err != nil {
panic(err)

View File

@ -129,12 +129,9 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
if failPct > dynamicTimeoutIncreaseThresholdPct {
// We are hitting the timeout too often, so increase the timeout by 25%
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
timeout := min(
// Set upper cap.
if timeout > int64(maxDynamicTimeout) {
timeout = int64(maxDynamicTimeout)
}
atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout))
// Safety, shouldn't happen
if timeout < dt.minimum {
timeout = dt.minimum

View File

@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
timeout.LogFailure()
}
@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
timeout.LogFailure()
}
adjusted := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
timeout.LogFailure()
}
@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
timeout.LogSuccess(20 * time.Second)
}
@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
initial := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
timeout.LogSuccess(20 * time.Second)
}
adjusted := timeout.Timeout()
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
timeout.LogSuccess(20 * time.Second)
}
@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
initial := timeout.Timeout()
const successTimeout = 20 * time.Second
for l := 0; l < 100; l++ {
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range 100 {
for range dynamicTimeoutLogSize {
timeout.LogSuccess(successTimeout)
}
}
@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
rng := rand.New(rand.NewSource(int64(i)))
go func() {
defer wg.Done()
for i := 0; i < 100; i++ {
for j := 0; j < 100; j++ {
for range 100 {
for range 100 {
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
}
to := timeout.Timeout()
@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
initial := timeout.Timeout()
const successTimeout = 20 * time.Second
for l := 0; l < 100; l++ {
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range 100 {
for range dynamicTimeoutLogSize {
timeout.LogSuccess(successTimeout)
}
}
@ -166,13 +166,9 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
const successTimeout = 20 * time.Second
for i := 0; i < dynamicTimeoutLogSize; i++ {
for range dynamicTimeoutLogSize {
rnd := f()
duration := time.Duration(float64(successTimeout) * rnd)
if duration < 100*time.Millisecond {
duration = 100 * time.Millisecond
}
duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond)
if duration >= time.Minute {
timeout.LogFailure()
} else {
@ -188,7 +184,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
initial := timeout.Timeout()
for try := 0; try < 10; try++ {
for range 10 {
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
}
@ -205,7 +201,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
initial := timeout.Timeout()
for try := 0; try < 10; try++ {
for range 10 {
testDynamicTimeoutAdjust(t, timeout, func() float64 {
return 1.0 + rand.NormFloat64()
})

View File

@ -29,6 +29,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"path"
"strconv"
@ -117,10 +118,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
names = make([]string, 0, BatchSize)
)
for len(objects) > 0 {
N := BatchSize
if len(objects) < BatchSize {
N = len(objects)
}
N := min(len(objects), BatchSize)
batch := objects[:N]
// We have to decrypt only ETags of SSE-S3 single-part
@ -317,9 +315,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
// of the client provided context and add the bucket
// key, if not present.
kmsCtx := kms.Context{}
for k, v := range cryptoCtx {
kmsCtx[k] = v
}
maps.Copy(kmsCtx, cryptoCtx)
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
@ -389,9 +385,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
// of the client provided context and add the bucket
// key, if not present.
kmsCtx := kms.Context{}
for k, v := range cryptoCtx {
kmsCtx[k] = v
}
maps.Copy(kmsCtx, cryptoCtx)
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}

View File

@ -384,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) {
// Simple useful utilities
repeat = func(k int64, n int) []int64 {
a := []int64{}
for i := 0; i < n; i++ {
for range n {
a = append(a, k)
}
return a
@ -471,10 +471,7 @@ func TestGetDecryptedRange(t *testing.T) {
// round up the lbPartOffset
// to the end of the
// corresponding DARE package
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
if lbPkgEndOffset > v {
lbPkgEndOffset = v
}
lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v)
bytesToDrop := v - lbPkgEndOffset
// Last segment to update `l`

View File

@ -22,7 +22,7 @@ import (
"fmt"
"net/url"
"runtime"
"sort"
"slices"
"strings"
"github.com/cespare/xxhash/v2"
@ -122,9 +122,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
// eyes that we prefer a sorted setCount slice for the
// subsequent function to figure out the right common
// divisor, it avoids loops.
sort.Slice(setCounts, func(i, j int) bool {
return setCounts[i] < setCounts[j]
})
slices.Sort(setCounts)
return setCounts
}

View File

@ -55,7 +55,6 @@ func TestCreateServerEndpoints(t *testing.T) {
}
for i, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
srvCtxt := serverCtxt{}
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
@ -85,7 +84,6 @@ func TestGetDivisibleSize(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotGCD := getDivisibleSize(testCase.totalSizes)
if testCase.result != gotGCD {
@ -172,7 +170,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args {
@ -294,7 +291,6 @@ func TestGetSetIndexes(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
for i, arg := range testCase.args {
@ -637,7 +633,6 @@ func TestParseEndpointSet(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
gotEs, err := parseEndpointSet(0, testCase.arg)
if err != nil && testCase.success {

View File

@ -312,7 +312,6 @@ func TestCreateEndpoints(t *testing.T) {
}
for i, testCase := range testCases {
i := i
testCase := testCase
t.Run("", func(t *testing.T) {
var srvCtxt serverCtxt

View File

@ -136,10 +136,7 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
shardSize := e.ShardSize()
shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + length) / e.blockSize
tillOffset := endShard*shardSize + shardSize
if tillOffset > shardFileSize {
tillOffset = shardFileSize
}
tillOffset := min(endShard*shardSize+shardSize, shardFileSize)
return tillOffset
}

View File

@ -30,7 +30,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
var mu sync.Mutex
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, i := range r.Perm(len(disks)) {
i := i
wg.Add(1)
go func() {
defer wg.Done()

View File

@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
buf := &bytes.Buffer{}
// Verify erasure.Decode() for random offsets and lengths.
for i := 0; i < iterations; i++ {
for range iterations {
offset := r.Int63n(length)
readLen := r.Int63n(length - offset)
@ -308,17 +308,16 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
b.Fatalf("failed to create erasure test file: %v", err)
}
for i := 0; i < dataDown; i++ {
for i := range dataDown {
writers[i] = nil
}
for i := data; i < data+parityDown; i++ {
writers[i] = nil
}
b.ResetTimer()
b.SetBytes(size)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
bitrotReaders := make([]io.ReaderAt, len(disks))
for index, disk := range disks {
if writers[index] == nil {

View File

@ -172,17 +172,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
content := make([]byte, size)
for i := 0; i < dataDown; i++ {
for i := range dataDown {
disks[i] = OfflineDisk
}
for i := data; i < data+parityDown; i++ {
disks[i] = OfflineDisk
}
b.ResetTimer()
b.SetBytes(size)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
writers := make([]io.Writer, len(disks))
for i, disk := range disks {
if disk == OfflineDisk {

View File

@ -102,7 +102,7 @@ func TestErasureHeal(t *testing.T) {
// setup stale disks for the test case
staleDisks := make([]StorageAPI, len(disks))
copy(staleDisks, disks)
for j := 0; j < len(staleDisks); j++ {
for j := range staleDisks {
if j < test.offDisks {
readers[j] = nil
} else {

View File

@ -175,7 +175,7 @@ func TestListOnlineDisks(t *testing.T) {
fourNanoSecs := time.Unix(4, 0).UTC()
modTimesThreeNone := make([]time.Time, 16)
modTimesThreeFour := make([]time.Time, 16)
for i := 0; i < 16; i++ {
for i := range 16 {
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
// to be tampered with.
if i > 12 {
@ -244,7 +244,6 @@ func TestListOnlineDisks(t *testing.T) {
}
for i, test := range testCases {
test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil {
@ -350,7 +349,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
fourNanoSecs := time.Unix(4, 0).UTC()
modTimesThreeNone := make([]time.Time, 16)
modTimesThreeFour := make([]time.Time, 16)
for i := 0; i < 16; i++ {
for i := range 16 {
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
// to be tampered with.
if i > 12 {
@ -419,7 +418,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
}
for i, test := range testCases {
test := test
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
_, err := obj.PutObject(ctx, bucket, object,
mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
@ -753,7 +751,7 @@ func TestCommonParities(t *testing.T) {
}
for idx, test := range tests {
var metaArr []FileInfo
for i := 0; i < 12; i++ {
for i := range 12 {
fi := test.fi1
if i%2 == 0 {
fi = test.fi2

View File

@ -116,7 +116,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
g := errgroup.WithNErrs(len(storageDisks))
for index := range storageDisks {
index := index
g.Go(func() error {
if storageDisks[index] == nil {
// we ignore disk not found errors

View File

@ -296,7 +296,6 @@ func TestIsObjectDangling(t *testing.T) {
// Add new cases as seen
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs)
if !gotMeta.Equals(testCase.expectedMeta) {

View File

@ -204,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
g := errgroup.WithNErrs(len(disks))
// Read `xl.meta` in parallel across disks.
for index := range disks {
index := index
g.Go(func() (err error) {
if disks[index] == nil {
return errDiskNotFound

View File

@ -55,7 +55,7 @@ func TestDiskCount(t *testing.T) {
// of errors into a single maximal error with in the list.
func TestReduceErrs(t *testing.T) {
canceledErrs := make([]error, 0, 5)
for i := 0; i < 5; i++ {
for i := range 5 {
canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled))
}
// List all of all test cases to validate various cases of reduce errors.
@ -222,7 +222,7 @@ func Test_hashOrder(t *testing.T) {
var tmp [16]byte
rng.Read(tmp[:])
prefix := hex.EncodeToString(tmp[:])
for i := 0; i < 10000; i++ {
for range 10000 {
rng.Read(tmp[:])
y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x)

View File

@ -408,7 +408,6 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound

View File

@ -189,7 +189,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
commonNumVersions := 2
numVersionsInQuorum := make([]int, 16)
numVersionsNoQuorum := make([]int, 16)
for i := 0; i < 16; i++ {
for i := range 16 {
if i < 4 {
continue
}
@ -269,7 +269,6 @@ func TestFindFileInfoInQuorum(t *testing.T) {
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum)
_, ok1 := err.(InsufficientReadQuorum)
@ -316,7 +315,7 @@ func TestTransitionInfoEquals(t *testing.T) {
}
var i uint
for i = 0; i < 8; i++ {
for i = range uint(8) {
fi := FileInfo{
TransitionTier: inputs[0].tier,
TransitionedObjName: inputs[0].remoteObjName,

View File

@ -322,7 +322,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
uploads = append(uploads, MultipartInfo{
Bucket: bucket,
Object: object,
UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))),
UploadID: base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadID)),
Initiated: startTime,
})
populatedUploadIDs.Add(uploadID)
@ -498,7 +498,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
partsMetadata[index].Metadata = userDefined
}
uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano())
uploadID := base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadUUID)))
uploadID := base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadUUID))
uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID)
// Write updated `xl.meta` to all disks.
@ -540,7 +540,6 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
@ -820,7 +819,6 @@ func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI
objectParts := make([][]string, len(onlineDisks))
// List uploaded parts from drives.
for index := range onlineDisks {
index := index
g.Go(func() (err error) {
if onlineDisks[index] == nil {
return errDiskNotFound
@ -995,7 +993,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
objectPartInfos := make([][]*ObjectPartInfo, len(disks))
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() (err error) {
if disks[index] == nil {
return errDiskNotFound

View File

@ -24,6 +24,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"net/http"
"path"
"runtime"
@ -542,7 +543,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
@ -575,7 +575,6 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
rawFileInfos := make([]RawFileInfo, len(disks))
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() (err error) {
if disks[index] == nil {
return errDiskNotFound
@ -1029,7 +1028,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
dataDirs := make([]string, len(disks))
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
@ -1631,7 +1629,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
@ -1836,7 +1833,6 @@ func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object
}
g := errgroup.WithNErrs(len(onlineDisks))
for index := range onlineDisks {
index := index
g.Go(func() error {
if onlineDisks[index] == nil {
return nil
@ -1862,7 +1858,6 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return nil
@ -2222,9 +2217,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
return ObjectInfo{}, err
}
}
for k, v := range objInfo.UserDefined {
fi.Metadata[k] = v
}
maps.Copy(fi.Metadata, objInfo.UserDefined)
fi.ModTime = opts.MTime
fi.VersionID = opts.VersionID
@ -2294,9 +2287,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
fi.Metadata[xhttp.AmzObjectTagging] = tags
fi.ReplicationState = opts.PutReplicationState()
for k, v := range opts.UserDefined {
fi.Metadata[k] = v
}
maps.Copy(fi.Metadata, opts.UserDefined)
if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
@ -2314,7 +2305,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o
// Start writing `xl.meta` to all disks in parallel.
for index := range onlineDisks {
index := index
g.Go(func() error {
if onlineDisks[index] == nil {
return errDiskNotFound

View File

@ -112,7 +112,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
}
for _, test := range testCases {
test := test
t.Run("", func(t *testing.T) {
_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{})
if err != nil {
@ -625,7 +624,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
t.Fatal(err)
}
for f := 0; f < 2; f++ {
for f := range 2 {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
diskErrors[i] = nil
@ -774,7 +773,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,4)
for f := 0; f < 2; f++ {
for f := range 2 {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
diskErrors[i] = nil
@ -837,7 +836,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
// in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,2)
for f := 0; f < 2; f++ {
for f := range 2 {
t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) {
diskErrors := make(map[int]error)
for i := 0; i <= f; i++ {
@ -1109,7 +1108,6 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
{parts7, errs7, 11, 11, parts7SC, nil},
}
for _, tt := range tests {
tt := tt
t.(*testing.T).Run("", func(t *testing.T) {
globalStorageClass.Update(tt.storageClassCfg)
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks)))

View File

@ -25,6 +25,7 @@ import (
"io"
"math/rand"
"net/http"
"slices"
"sort"
"strings"
"time"
@ -117,12 +118,7 @@ func (pd *PoolDecommissionInfo) bucketPop(bucket string) bool {
}
func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool {
for _, b := range pd.DecommissionedBuckets {
if b == bucket {
return true
}
}
return false
return slices.Contains(pd.DecommissionedBuckets, bucket)
}
func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) {
@ -792,8 +788,6 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}
for setIdx, set := range pool.sets {
set := set
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
@ -901,7 +895,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
}
// gr.Close() is ensured by decommissionObject().
for try := 0; try < 3; try++ {
for range 3 {
if version.IsRemote() {
if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{
VersionID: versionID,

View File

@ -176,7 +176,6 @@ func TestPoolMetaValidate(t *testing.T) {
t.Parallel()
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
update, err := testCase.meta.validate(testCase.pools)
if testCase.expectedErr {

View File

@ -580,8 +580,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
}
for setIdx, set := range pool.sets {
set := set
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
@ -594,7 +592,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal)
return true
}
return false
}
@ -689,7 +686,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
continue
}
for try := 0; try < 3; try++ {
for range 3 {
// GetObjectReader.Close is called by rebalanceObject
gr, err := set.GetObjectNInfo(ctx,
bucket,

View File

@ -420,7 +420,6 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b
nSets := make([]int, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
// Skip suspended pools or pools participating in rebalance for any new
// I/O.
if z.IsSuspended(index) || z.IsPoolRebalancing(index) {
@ -660,7 +659,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
g.Go(func() error {
return z.serverPools[index].Shutdown(ctx)
}, index)
@ -712,7 +710,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool)
storageInfos := make([]StorageInfo, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
g.Go(func() error {
storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics)
return nil
@ -1268,7 +1265,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools))
for i, pool := range z.serverPools {
i := i
pool := pool
eg.Go(func() error {
dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts)
@ -2244,7 +2240,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
for poolIdx, erasureSet := range z.serverPools {
for setIdx, set := range erasureSet.sets {
set := set
listOut := make(chan metaCacheEntry, 1)
entries = append(entries, listOut)
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)

View File

@ -392,7 +392,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
var lk sync.Mutex
for i := range setCount {
lockerEpSet := set.NewStringSet()
for j := 0; j < setDriveCount; j++ {
for j := range setDriveCount {
wg.Add(1)
go func(i int, endpoint Endpoint) {
defer wg.Done()
@ -415,7 +415,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
defer wg.Done()
var innerWg sync.WaitGroup
for j := 0; j < setDriveCount; j++ {
for j := range setDriveCount {
disk := storageDisks[i*setDriveCount+j]
if disk == nil {
continue
@ -593,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo {
g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets {
index := index
g.Go(func() error {
storageInfos[index] = s.sets[index].StorageInfo(ctx)
return nil
@ -618,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag
g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets {
index := index
g.Go(func() error {
storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics)
return nil
@ -641,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets {
index := index
g.Go(func() error {
return s.sets[index].Shutdown(ctx)
}, index)
@ -705,7 +702,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
g := errgroup.WithNErrs(len(storageDisks))
for index := range storageDisks {
index := index
g.Go(func() error {
if storageDisks[index] == nil {
// we ignore disk not found errors

View File

@ -40,13 +40,12 @@ func BenchmarkCrcHash(b *testing.B) {
{1024},
}
for _, testCase := range cases {
testCase := testCase
key := randString(testCase.key)
b.Run("", func(b *testing.B) {
b.SetBytes(1024)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
crcHashMod(key, 16)
}
})
@ -65,13 +64,12 @@ func BenchmarkSipHash(b *testing.B) {
{1024},
}
for _, testCase := range cases {
testCase := testCase
key := randString(testCase.key)
b.Run("", func(b *testing.B) {
b.SetBytes(1024)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
sipHashMod(key, 16, testUUID)
}
})
@ -164,7 +162,7 @@ func TestNewErasureSets(t *testing.T) {
nDisks := 16 // Maximum disks.
var erasureDisks []string
for i := 0; i < nDisks; i++ {
for range nDisks {
// Do not attempt to create this path, the test validates
// so that newErasureSets initializes non existing paths
// and successfully returns initialized object layer.

View File

@ -21,6 +21,7 @@ import (
"context"
"errors"
"fmt"
"maps"
"math/rand"
"os"
"runtime"
@ -175,7 +176,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
di := madmin.Disk{
Endpoint: endpoints[index].String(),
@ -219,9 +219,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
di.Metrics.LastMinute[k] = v.asTimedAction()
}
}
for k, v := range info.Metrics.APICalls {
di.Metrics.APICalls[k] = v
}
maps.Copy(di.Metrics.APICalls, info.Metrics.APICalls)
if info.Total > 0 {
di.Utilization = float64(info.Used / info.Total * 100)
}
@ -287,7 +285,6 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new
infos := make([]DiskInfo, len(disks))
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, i := range r.Perm(len(disks)) {
i := i
wg.Add(1)
go func() {
defer wg.Done()

View File

@ -99,7 +99,7 @@ func fmtGenMain(ctxt *cli.Context) {
format := newFormatErasureV3(setCount, setDriveCount)
format.ID = deploymentID
for i := range setCount { // for each erasure set
for j := 0; j < setDriveCount; j++ {
for j := range setDriveCount {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]
if deploymentID != "" {

View File

@ -159,7 +159,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
for i := range numSets {
format.Erasure.Sets[i] = make([]string, setLen)
for j := 0; j < setLen; j++ {
for j := range setLen {
format.Erasure.Sets[i][j] = mustGetUUID()
}
}
@ -324,7 +324,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
// Load format from each disk in parallel
for index := range storageDisks {
index := index
g.Go(func() error {
if storageDisks[index] == nil {
return errDiskNotFound
@ -530,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format
// Write `format.json` to all disks.
for index := range storageDisks {
index := index
g.Go(func() error {
if formats[index] == nil {
return errDiskNotFound
@ -566,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag
storageDisks := make([]StorageAPI, len(endpoints))
g := errgroup.WithNErrs(len(endpoints))
for index := range endpoints {
index := index
g.Go(func() (err error) {
storageDisks[index], err = newStorageAPI(endpoints[index], opts)
return err
@ -600,7 +597,6 @@ func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool {
func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error {
g := errgroup.WithNErrs(len(formats))
for i := range formats {
i := i
g.Go(func() error {
if formats[i] == nil || !endpoints[i].IsLocal {
return nil
@ -641,7 +637,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
for i := range setCount {
hostCount := make(map[string]int, setDriveCount)
for j := 0; j < setDriveCount; j++ {
for j := range setDriveCount {
disk := storageDisks[i*setDriveCount+j]
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]
@ -662,7 +658,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
return
}
logger.Info(" * Set %v:", i+1)
for j := 0; j < setDriveCount; j++ {
for j := range setDriveCount {
disk := storageDisks[i*setDriveCount+j]
logger.Info(" - Drive: %s", disk.String())
}

View File

@ -48,7 +48,7 @@ func TestFixFormatV3(t *testing.T) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 8)
for j := 0; j < 8; j++ {
for j := range 8 {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[0][j]
formats[j] = newFormat
@ -79,7 +79,7 @@ func TestFormatErasureEmpty(t *testing.T) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 16)
for j := 0; j < 16; j++ {
for j := range 16 {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[0][j]
formats[j] = newFormat
@ -276,8 +276,8 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 32)
for i := 0; i < setCount; i++ {
for j := 0; j < setDriveCount; j++ {
for i := range setCount {
for j := range setDriveCount {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat
@ -390,18 +390,17 @@ func BenchmarkGetFormatErasureInQuorumOld(b *testing.B) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 15*200)
for i := 0; i < setCount; i++ {
for j := 0; j < setDriveCount; j++ {
for i := range setCount {
for j := range setDriveCount {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat
}
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
_, _ = getFormatErasureInQuorumOld(formats)
}
}
@ -414,18 +413,17 @@ func BenchmarkGetFormatErasureInQuorum(b *testing.B) {
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
formats := make([]*formatErasureV3, 15*200)
for i := 0; i < setCount; i++ {
for j := 0; j < setDriveCount; j++ {
for i := range setCount {
for j := range setDriveCount {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat
}
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
_, _ = getFormatErasureInQuorum(formats)
}
}
@ -440,8 +438,8 @@ func TestNewFormatSets(t *testing.T) {
formats := make([]*formatErasureV3, 32)
errs := make([]error, 32)
for i := 0; i < setCount; i++ {
for j := 0; j < setDriveCount; j++ {
for i := range setCount {
for j := range setDriveCount {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*setDriveCount+j] = newFormat

View File

@ -98,7 +98,7 @@ func (m *minioFileInfo) IsDir() bool {
return m.isDir
}
func (m *minioFileInfo) Sys() interface{} {
func (m *minioFileInfo) Sys() any {
return nil
}
@ -316,7 +316,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
if err != nil {
return nil, err
}
claims := make(map[string]interface{})
claims := make(map[string]any)
claims[expClaim] = UTCNow().Add(expiryDur).Unix()
claims[ldapUser] = lookupResult.NormDN

View File

@ -33,14 +33,14 @@ var globalRemoteFTPClientTransport = NewRemoteTargetHTTPTransport(true)()
type minioLogger struct{}
// Print implement Logger
func (log *minioLogger) Print(sessionID string, message interface{}) {
func (log *minioLogger) Print(sessionID string, message any) {
if serverDebugLog {
fmt.Printf("%s %s\n", sessionID, message)
}
}
// Printf implement Logger
func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) {
func (log *minioLogger) Printf(sessionID string, format string, v ...any) {
if serverDebugLog {
if sessionID != "" {
fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...))

View File

@ -23,6 +23,7 @@ import (
"net/http"
"path"
"runtime/debug"
"slices"
"strings"
"sync/atomic"
"time"
@ -396,8 +397,7 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler {
if k == "delimiter" { // delimiters are allowed to have `.` or `..`
continue
}
for _, v := range vv {
if hasBadPathComponent(v) {
if slices.ContainsFunc(vv, hasBadPathComponent) {
if ok {
tc.FuncName = "handler.ValidRequest"
tc.ResponseRecorder.LogErrBody = true
@ -409,7 +409,6 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler {
return
}
}
}
if hasMultipleAuth(r) {
if ok {
tc.FuncName = "handler.Auth"

View File

@ -90,7 +90,7 @@ var isHTTPHeaderSizeTooLargeTests = []struct {
func generateHeader(size, usersize int) http.Header {
header := http.Header{}
for i := 0; i < size; i++ {
for i := range size {
header.Set(strconv.Itoa(i), "")
}
userlength := 0
@ -136,7 +136,6 @@ var containsReservedMetadataTests = []struct {
func TestContainsReservedMetadata(t *testing.T) {
for _, test := range containsReservedMetadataTests {
test := test
t.Run("", func(t *testing.T) {
contains := containsReservedMetadata(test.header)
if contains && !test.shouldFail {
@ -201,7 +200,7 @@ func Benchmark_hasBadPathComponent(t *testing.B) {
t.Run(tt.name, func(b *testing.B) {
b.SetBytes(int64(len(tt.input)))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
if got := hasBadPathComponent(tt.input); got != tt.want {
t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want)
}

View File

@ -292,7 +292,7 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
return contentEnc
}
var newEncs []string
for _, enc := range strings.Split(contentEnc, ",") {
for enc := range strings.SplitSeq(contentEnc, ",") {
if enc != streamingContentEncoding {
newEncs = append(newEncs, enc)
}

View File

@ -54,10 +54,7 @@ func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err er
case h.IsSuffixLength:
specifiedLen := -h.Start
rangeLength = specifiedLen
if specifiedLen > resourceSize {
rangeLength = resourceSize
}
rangeLength = min(specifiedLen, resourceSize)
case h.Start >= resourceSize:
return 0, InvalidRange{
@ -98,10 +95,7 @@ func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64
start = h.Start
if h.IsSuffixLength {
start = resourceSize + h.Start
if start < 0 {
start = 0
}
start = max(resourceSize+h.Start, 0)
}
return start, length, nil
}

View File

@ -98,7 +98,7 @@ func (ies *IAMEtcdStore) getUsersSysType() UsersSysType {
return ies.usersSysType
}
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error {
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item any, itemPath string, opts ...options) error {
data, err := json.Marshal(item)
if err != nil {
return err
@ -114,7 +114,7 @@ func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, it
return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...)
}
func getIAMConfig(item interface{}, data []byte, itemPath string) error {
func getIAMConfig(item any, data []byte, itemPath string) error {
data, err := decryptData(data, itemPath)
if err != nil {
return err
@ -123,7 +123,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error {
return json.Unmarshal(data, item)
}
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error {
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item any, path string) error {
data, err := readKeyEtcd(ctx, ies.client, path)
if err != nil {
return err

View File

@ -22,6 +22,7 @@ import (
"context"
"errors"
"fmt"
"maps"
"path"
"strings"
"sync"
@ -80,7 +81,7 @@ func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType {
return iamOS.usersSysType
}
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error {
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item any, objPath string, opts ...options) error {
json := jsoniter.ConfigCompatibleWithStandardLibrary
data, err := json.Marshal(item)
if err != nil {
@ -135,7 +136,7 @@ func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context,
return data, meta, nil
}
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error {
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item any, objPath string) error {
data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath)
if err != nil {
return err
@ -294,7 +295,6 @@ func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IA
g := errgroup.WithNErrs(len(users))
for index := range users {
index := index
g.Go(func() error {
userName := path.Dir(users[index])
user, err := iamOS.loadUserIdentity(ctx, userName, userType)
@ -413,7 +413,6 @@ func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, use
g := errgroup.WithNErrs(len(users))
for index := range users {
index := index
g.Go(func() error {
userName := strings.TrimSuffix(users[index], ".json")
userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup)
@ -538,7 +537,6 @@ func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, polici
g := errgroup.WithNErrs(len(policies))
for index := range policies {
index := index
g.Go(func() error {
policyName := path.Dir(policies[index])
policyDoc, err := iamOS.loadPolicy(ctx, policyName)
@ -776,9 +774,7 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
}
// Copy svcUsersMap to cache.iamUsersMap
for k, v := range svcUsersMap {
cache.iamUsersMap[k] = v
}
maps.Copy(cache.iamUsersMap, svcUsersMap)
cache.buildUserGroupMemberships()

View File

@ -23,6 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
"maps"
"path"
"sort"
"strings"
@ -159,7 +160,7 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string
type UserIdentity struct {
Version int `json:"version"`
Credentials auth.Credentials `json:"credentials"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt"`
}
func newUserIdentity(cred auth.Credentials) UserIdentity {
@ -171,7 +172,7 @@ type GroupInfo struct {
Version int `json:"version"`
Status string `json:"status"`
Members []string `json:"members"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt"`
}
func newGroupInfo(members []string) GroupInfo {
@ -182,7 +183,7 @@ func newGroupInfo(members []string) GroupInfo {
type MappedPolicy struct {
Version int `json:"version"`
Policies string `json:"policy"`
UpdatedAt time.Time `json:"updatedAt,omitempty"`
UpdatedAt time.Time `json:"updatedAt"`
}
// mappedPoliciesToMap copies the map of mapped policies to a regular map.
@ -198,7 +199,7 @@ func mappedPoliciesToMap(m *xsync.MapOf[string, MappedPolicy]) map[string]Mapped
// converts a mapped policy into a slice of distinct policies
func (mp MappedPolicy) toSlice() []string {
var policies []string
for _, policy := range strings.Split(mp.Policies, ",") {
for policy := range strings.SplitSeq(mp.Policies, ",") {
if strings.TrimSpace(policy) == "" {
continue
}
@ -219,8 +220,8 @@ func newMappedPolicy(policy string) MappedPolicy {
type PolicyDoc struct {
Version int `json:",omitempty"`
Policy policy.Policy
CreateDate time.Time `json:",omitempty"`
UpdateDate time.Time `json:",omitempty"`
CreateDate time.Time
UpdateDate time.Time
}
func newPolicyDoc(p policy.Policy) PolicyDoc {
@ -400,7 +401,6 @@ func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool,
g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time.
for index := range groups {
index := index
g.Go(func() error {
err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap)
if err != nil && !errors.Is(err, errNoSuchPolicy) {
@ -610,8 +610,8 @@ type IAMStorageAPI interface {
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
loadIAMConfig(ctx context.Context, item interface{}, path string) error
saveIAMConfig(ctx context.Context, item any, path string, opts ...options) error
loadIAMConfig(ctx context.Context, item any, path string) error
deleteIAMConfig(ctx context.Context, path string) error
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
@ -839,7 +839,7 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string,
return policies, nil
}
if store.policy != nil {
val, err, _ := store.policy.Do(name, func() (interface{}, error) {
val, err, _ := store.policy.Do(name, func() (any, error) {
return getPolicies()
})
if err != nil {
@ -1614,9 +1614,7 @@ func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Polic
}
cache := store.lock()
for policy, p := range m {
cache.iamPolicyDocsMap[policy] = p
}
maps.Copy(cache.iamPolicyDocsMap, m)
store.unlock()
for policy, p := range m {
@ -2909,7 +2907,7 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred
func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error {
groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn
newCachePopulate := func() (val interface{}, err error) {
newCachePopulate := func() (val any, err error) {
newCache := newIamCache()
// Check for service account first
@ -2975,7 +2973,7 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
}
var (
val interface{}
val any
err error
)
if store.group != nil {
@ -3007,30 +3005,20 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
return true
})
for k, v := range newCache.iamGroupsMap {
cache.iamGroupsMap[k] = v
}
maps.Copy(cache.iamGroupsMap, newCache.iamGroupsMap)
for k, v := range newCache.iamPolicyDocsMap {
cache.iamPolicyDocsMap[k] = v
}
maps.Copy(cache.iamPolicyDocsMap, newCache.iamPolicyDocsMap)
for k, v := range newCache.iamUserGroupMemberships {
cache.iamUserGroupMemberships[k] = v
}
maps.Copy(cache.iamUserGroupMemberships, newCache.iamUserGroupMemberships)
newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool {
cache.iamUserPolicyMap.Store(k, v)
return true
})
for k, v := range newCache.iamUsersMap {
cache.iamUsersMap[k] = v
}
maps.Copy(cache.iamUsersMap, newCache.iamUsersMap)
for k, v := range newCache.iamSTSAccountsMap {
cache.iamSTSAccountsMap[k] = v
}
maps.Copy(cache.iamSTSAccountsMap, newCache.iamSTSAccountsMap)
newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool {
cache.iamSTSPolicyMap.Store(k, v)

View File

@ -1056,7 +1056,7 @@ type newServiceAccountOpts struct {
expiration *time.Time
allowSiteReplicatorAccount bool // allow creating internal service account for site-replication.
claims map[string]interface{}
claims map[string]any
}
// NewServiceAccount - create a new service account
@ -1099,7 +1099,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount {
return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed
}
m := make(map[string]interface{})
m := make(map[string]any)
m[parentClaim] = parentUser
if len(policyBuf) > 0 {
@ -1345,7 +1345,7 @@ func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) (
}
// GetClaimsForSvcAcc - gets the claims associated with the service account.
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) {
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]any, error) {
if !sys.Initialized() {
return nil, errServerNotInitialized
}
@ -1696,10 +1696,8 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...))
}
for k, v := range updatedKeysMap {
// Replace the map values with the updated ones
accessKeyMap[k] = v
}
maps.Copy(accessKeyMap, updatedKeysMap)
return skippedAccessKeys, nil
}

View File

@ -19,6 +19,7 @@ package cmd
import (
"errors"
"maps"
"net/http"
"time"
@ -110,9 +111,7 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b
return nil, nil, false, errAuthentication
}
for k, v := range eclaims {
claims.MapClaims[k] = v
}
maps.Copy(claims.MapClaims, eclaims)
// if root access is disabled, disable all its service accounts and temporary credentials.
if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() {

View File

@ -175,7 +175,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
fn := authenticateNode
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
fn(creds.AccessKey, creds.SecretKey)
}
})
@ -183,7 +183,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
fn := newCachedAuthToken()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
fn()
}
})

View File

@ -139,7 +139,7 @@ func pickRelevantGoroutines() (gs []string) {
// get runtime stack buffer.
buf := debug.Stack()
// runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" .
for _, g := range strings.Split(string(buf), "\n\n") {
for g := range strings.SplitSeq(string(buf), "\n\n") {
// Again split on a new line, the first line of the second half contains the info about the go routine.
sl := strings.SplitN(g, "\n", 2)
if len(sl) != 2 {

View File

@ -329,7 +329,7 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
lris, ok := l.lockMap[resource]
if !ok {
// Just to be safe, delete uuids.
for idx := 0; idx < maxDeleteList; idx++ {
for idx := range maxDeleteList {
mapID := formatUUID(uid, idx)
if _, ok := l.lockUID[mapID]; !ok {
break

View File

@ -279,12 +279,12 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) {
}
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
l := newLocker()
for i := 0; i < locks; i++ {
for range locks {
var tmp [16]byte
rng.Read(tmp[:])
res := []string{hex.EncodeToString(tmp[:])}
for i := 0; i < readers; i++ {
for range readers {
rng.Read(tmp[:])
ok, err := l.RLock(t.Context(), dsync.LockArgs{
UID: uuid.NewString(),
@ -366,12 +366,12 @@ func Test_localLocker_RUnlock(t *testing.T) {
}
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
l := newLocker()
for i := 0; i < locks; i++ {
for range locks {
var tmp [16]byte
rng.Read(tmp[:])
res := []string{hex.EncodeToString(tmp[:])}
for i := 0; i < readers; i++ {
for range readers {
rng.Read(tmp[:])
ok, err := l.RLock(t.Context(), dsync.LockArgs{
UID: uuid.NewString(),

View File

@ -8,211 +8,211 @@ import (
"github.com/minio/minio/internal/logger"
)
func proxyLogIf(ctx context.Context, err error, errKind ...interface{}) {
func proxyLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "proxy", err, errKind...)
}
func replLogIf(ctx context.Context, err error, errKind ...interface{}) {
func replLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "replication", err, errKind...)
}
func replLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func replLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "replication", err, id, errKind...)
}
func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "iam", err, id, errKind...)
}
func iamLogIf(ctx context.Context, err error, errKind ...interface{}) {
func iamLogIf(ctx context.Context, err error, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) {
logger.LogIf(ctx, "iam", err, errKind...)
}
}
func iamLogEvent(ctx context.Context, msg string, args ...interface{}) {
func iamLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "iam", msg, args...)
}
func rebalanceLogIf(ctx context.Context, err error, errKind ...interface{}) {
func rebalanceLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "rebalance", err, errKind...)
}
func rebalanceLogEvent(ctx context.Context, msg string, args ...interface{}) {
func rebalanceLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "rebalance", msg, args...)
}
func adminLogIf(ctx context.Context, err error, errKind ...interface{}) {
func adminLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "admin", err, errKind...)
}
func authNLogIf(ctx context.Context, err error, errKind ...interface{}) {
func authNLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "authN", err, errKind...)
}
func authZLogIf(ctx context.Context, err error, errKind ...interface{}) {
func authZLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "authZ", err, errKind...)
}
func peersLogIf(ctx context.Context, err error, errKind ...interface{}) {
func peersLogIf(ctx context.Context, err error, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) {
logger.LogIf(ctx, "peers", err, errKind...)
}
}
func peersLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
func peersLogAlwaysIf(ctx context.Context, err error, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) {
logger.LogAlwaysIf(ctx, "peers", err, errKind...)
}
}
func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
if !errors.Is(err, grid.ErrDisconnected) {
logger.LogOnceIf(ctx, "peers", err, id, errKind...)
}
}
func bugLogIf(ctx context.Context, err error, errKind ...interface{}) {
func bugLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "internal", err, errKind...)
}
func healingLogIf(ctx context.Context, err error, errKind ...interface{}) {
func healingLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "healing", err, errKind...)
}
func healingLogEvent(ctx context.Context, msg string, args ...interface{}) {
func healingLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "healing", msg, args...)
}
func healingLogOnceIf(ctx context.Context, err error, errKind ...interface{}) {
func healingLogOnceIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "healing", err, errKind...)
}
func batchLogIf(ctx context.Context, err error, errKind ...interface{}) {
func batchLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "batch", err, errKind...)
}
func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "batch", err, id, errKind...)
}
func bootLogIf(ctx context.Context, err error, errKind ...interface{}) {
func bootLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "bootstrap", err, errKind...)
}
func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...)
}
func dnsLogIf(ctx context.Context, err error, errKind ...interface{}) {
func dnsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "dns", err, errKind...)
}
func internalLogIf(ctx context.Context, err error, errKind ...interface{}) {
func internalLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "internal", err, errKind...)
}
func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "internal", err, id, errKind...)
}
func transitionLogIf(ctx context.Context, err error, errKind ...interface{}) {
func transitionLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "transition", err, errKind...)
}
func configLogIf(ctx context.Context, err error, errKind ...interface{}) {
func configLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "config", err, errKind...)
}
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "config", err, id, errKind...)
}
func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...)
}
func scannerLogIf(ctx context.Context, err error, errKind ...interface{}) {
func scannerLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "scanner", err, errKind...)
}
func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "scanner", err, id, errKind...)
}
func ilmLogIf(ctx context.Context, err error, errKind ...interface{}) {
func ilmLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "ilm", err, errKind...)
}
func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "ilm", err, id, errKind...)
}
func encLogIf(ctx context.Context, err error, errKind ...interface{}) {
func encLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "encryption", err, errKind...)
}
func encLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func encLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "encryption", err, id, errKind...)
}
func storageLogIf(ctx context.Context, err error, errKind ...interface{}) {
func storageLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "storage", err, errKind...)
}
func storageLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
func storageLogAlwaysIf(ctx context.Context, err error, errKind ...any) {
logger.LogAlwaysIf(ctx, "storage", err, errKind...)
}
func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "storage", err, id, errKind...)
}
func decomLogIf(ctx context.Context, err error, errKind ...interface{}) {
func decomLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "decom", err, errKind...)
}
func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "decom", err, id, errKind...)
}
func decomLogEvent(ctx context.Context, msg string, args ...interface{}) {
func decomLogEvent(ctx context.Context, msg string, args ...any) {
logger.Event(ctx, "decom", msg, args...)
}
func etcdLogIf(ctx context.Context, err error, errKind ...interface{}) {
func etcdLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "etcd", err, errKind...)
}
func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "etcd", err, id, errKind...)
}
func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) {
func metricsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "metrics", err, errKind...)
}
func s3LogIf(ctx context.Context, err error, errKind ...interface{}) {
func s3LogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "s3", err, errKind...)
}
func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "sftp", err, id, errKind...)
}
func shutdownLogIf(ctx context.Context, err error, errKind ...interface{}) {
func shutdownLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "shutdown", err, errKind...)
}
func stsLogIf(ctx context.Context, err error, errKind ...interface{}) {
func stsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "sts", err, errKind...)
}
func tierLogIf(ctx context.Context, err error, errKind ...interface{}) {
func tierLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "tier", err, errKind...)
}
func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) {
func kmsLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "kms", err, errKind...)
}
@ -220,11 +220,11 @@ func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) {
type KMSLogger struct{}
// LogOnceIf is the implementation of LogOnceIf, accessible using the Logger interface
func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "kms", err, id, errKind...)
}
// LogIf is the implementation of LogIf, accessible using the Logger interface
func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...interface{}) {
func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "kms", err, errKind...)
}

View File

@ -20,6 +20,7 @@ package cmd
import (
"context"
"errors"
"maps"
"runtime/debug"
"sort"
"sync"
@ -70,7 +71,7 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache {
}
}
func (b *bucketMetacache) debugf(format string, data ...interface{}) {
func (b *bucketMetacache) debugf(format string, data ...any) {
if serverDebugLog {
console.Debugf(format+"\n", data...)
}
@ -195,9 +196,7 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri
b.mu.RLock()
defer b.mu.RUnlock()
dst := make(map[string]metacache, len(b.caches))
for k, v := range b.caches {
dst[k] = v
}
maps.Copy(dst, b.caches)
// Copy indexes
dst2 := make(map[string][]string, len(b.cachesRoot))
for k, v := range b.cachesRoot {

View File

@ -33,7 +33,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
for i := range pathNames[:] {
pathNames[i] = fmt.Sprintf("prefix/%d", i)
}
for i := 0; i < elements; i++ {
for i := range elements {
bm.findCache(listPathOptions{
ID: mustGetUUID(),
Bucket: "",
@ -49,8 +49,8 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
})
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
bm.findCache(listPathOptions{
ID: mustGetUUID(),
Bucket: "",

View File

@ -633,7 +633,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
for testID, tt := range tests {
rng := rand.New(rand.NewSource(0))
// Run for a number of times, shuffling the input to ensure that output is consistent.
for i := 0; i < 10; i++ {
for i := range 10 {
t.Run(fmt.Sprintf("test-%d-%s-run-%d", testID, tt.name, i), func(t *testing.T) {
if i > 0 {
rng.Shuffle(len(tt.m), func(i, j int) {

View File

@ -38,8 +38,8 @@ func (o *listPathOptions) parseMarker() {
o.Marker = s[:start]
end := strings.LastIndex(s, "]")
tag := strings.Trim(s[start:end], "[]")
tags := strings.Split(tag, ",")
for _, tag := range tags {
tags := strings.SplitSeq(tag, ",")
for tag := range tags {
kv := strings.Split(tag, ":")
if len(kv) < 2 {
continue

View File

@ -25,6 +25,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"strconv"
"strings"
@ -162,13 +163,13 @@ func (o listPathOptions) newMetacache() metacache {
}
}
func (o *listPathOptions) debugf(format string, data ...interface{}) {
func (o *listPathOptions) debugf(format string, data ...any) {
if serverDebugLog {
console.Debugf(format+"\n", data...)
}
}
func (o *listPathOptions) debugln(data ...interface{}) {
func (o *listPathOptions) debugln(data ...any) {
if serverDebugLog {
console.Debugln(data...)
}
@ -906,9 +907,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
fi := FileInfo{
Metadata: make(map[string]string, len(meta)),
}
for k, v := range meta {
fi.Metadata[k] = v
}
maps.Copy(fi.Metadata, meta)
err := er.updateObjectMetaWithOpts(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks(), UpdateMetadataOpts{NoPersistence: true})
if err == nil {
break

View File

@ -20,6 +20,7 @@ package cmd
import (
"context"
"fmt"
"maps"
"math"
"net/http"
"runtime"
@ -431,15 +432,9 @@ func (m *MetricV2) clone() MetricV2 {
VariableLabels: make(map[string]string, len(m.VariableLabels)),
Histogram: make(map[string]uint64, len(m.Histogram)),
}
for k, v := range m.StaticLabels {
metric.StaticLabels[k] = v
}
for k, v := range m.VariableLabels {
metric.VariableLabels[k] = v
}
for k, v := range m.Histogram {
metric.Histogram[k] = v
}
maps.Copy(metric.StaticLabels, m.StaticLabels)
maps.Copy(metric.VariableLabels, m.VariableLabels)
maps.Copy(metric.Histogram, m.Histogram)
return metric
}
@ -2492,10 +2487,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
"endpoint": ep,
},
}
dwntime := currDowntime
if health.offlineDuration > currDowntime {
dwntime = health.offlineDuration
}
dwntime := max(health.offlineDuration, currDowntime)
downtimeDuration.Value = float64(dwntime / time.Second)
ml = append(ml, downtimeDuration)
}

View File

@ -35,7 +35,7 @@ import (
type promLogger struct{}
func (p promLogger) Println(v ...interface{}) {
func (p promLogger) Println(v ...any) {
metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v))
}

View File

@ -45,7 +45,7 @@ func TestNSLockRace(t *testing.T) {
ctx := t.Context()
for i := 0; i < 10000; i++ {
for i := range 10000 {
nsLk := newNSLock(false)
// lk1; ref=1

View File

@ -201,7 +201,6 @@ func TestCheckLocalServerAddr(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
err := CheckLocalServerAddr(testCase.serverAddr)
switch {
@ -273,7 +272,6 @@ func TestSameLocalAddrs(t *testing.T) {
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2)
if testCase.expectedErr != nil && err == nil {

View File

@ -155,7 +155,6 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -170,7 +169,6 @@ func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string)
func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -185,7 +183,6 @@ func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) [
func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -200,7 +197,6 @@ func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup s
func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -215,7 +211,6 @@ func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []
func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp bool) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -230,7 +225,6 @@ func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp
func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -245,7 +239,6 @@ func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []Notif
func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -260,7 +253,6 @@ func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey
func (sys *NotificationSys) LoadServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr {
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
for idx, client := range sys.peerClients {
client := client
ng.Go(ctx, func() error {
if client == nil {
return errPeerNotReachable
@ -276,7 +268,6 @@ func (sys *NotificationSys) BackgroundHealStatus(ctx context.Context) ([]madmin.
ng := WithNPeers(len(sys.peerClients))
states := make([]madmin.BgHealState, len(sys.peerClients))
for idx, client := range sys.peerClients {
idx := idx
client := client
ng.Go(ctx, func() error {
if client == nil {
@ -485,7 +476,6 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe
locksResp := make([]*PeerLocks, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
index := index
client := client
g.Go(func() error {
if client == nil {
@ -570,7 +560,6 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
replicationStats := make([]BucketStatsMap, len(sys.peerClients))
for index, client := range sys.peerClients {
index := index
client := client
ng.Go(ctx, func() error {
if client == nil {
@ -612,7 +601,6 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
bucketStats := make([]BucketStats, len(sys.peerClients))
for index, client := range sys.peerClients {
index := index
client := client
ng.Go(ctx, func() error {
if client == nil {
@ -647,7 +635,6 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri
ng := WithNPeers(len(sys.peerClients)).WithRetries(1)
siteStats := make([]SRMetricsSummary, len(sys.peerClients))
for index, client := range sys.peerClients {
index := index
client := client
ng.Go(ctx, func() error {
if client == nil {
@ -926,7 +913,6 @@ func (sys *NotificationSys) GetResourceMetrics(ctx context.Context) <-chan Metri
g := errgroup.WithNErrs(len(sys.peerClients))
peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
for index := range sys.peerClients {
index := index
g.Go(func() error {
if sys.peerClients[index] == nil {
return errPeerNotReachable
@ -1302,7 +1288,6 @@ func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan MetricV
g := errgroup.WithNErrs(len(sys.peerClients))
peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
for index := range sys.peerClients {
index := index
g.Go(func() error {
if sys.peerClients[index] == nil {
return errPeerNotReachable
@ -1323,7 +1308,6 @@ func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan Metric
g := errgroup.WithNErrs(len(sys.peerClients))
peerChannels := make([]<-chan MetricV2, len(sys.peerClients))
for index := range sys.peerClients {
index := index
g.Go(func() error {
if sys.peerClients[index] == nil {
return errPeerNotReachable

View File

@ -19,6 +19,7 @@ package cmd
import (
"io"
"maps"
"math"
"net/http"
"time"
@ -290,9 +291,7 @@ func (o *ObjectInfo) Clone() (cinfo ObjectInfo) {
VersionPurgeStatusInternal: o.VersionPurgeStatusInternal,
}
cinfo.UserDefined = make(map[string]string, len(o.UserDefined))
for k, v := range o.UserDefined {
cinfo.UserDefined[k] = v
}
maps.Copy(cinfo.UserDefined, o.UserDefined)
return cinfo
}

View File

@ -156,7 +156,6 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te
}
for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
var err error
var resultL ListObjectsInfo
@ -944,7 +943,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v
}
for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys)
result, err := obj.ListObjects(t.Context(), testCase.bucketName,
@ -1676,7 +1674,6 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
}
for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
result, err := obj.ListObjectVersions(t.Context(), testCase.bucketName,
testCase.prefix, testCase.marker, "", testCase.delimiter, int(testCase.maxKeys))
@ -1827,7 +1824,6 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr
}
for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
var foundObjects []ObjectInfo
var foundPrefixes []string
@ -1914,7 +1910,7 @@ func BenchmarkListObjects(b *testing.B) {
}
// Insert objects to be listed and benchmarked later.
for i := 0; i < 20000; i++ {
for i := range 20000 {
key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(b.Context(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{})
if err != nil {
@ -1922,10 +1918,8 @@ func BenchmarkListObjects(b *testing.B) {
}
}
b.ResetTimer()
// List the buckets over and over and over.
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err = obj.ListObjects(b.Context(), bucket, "", "obj9000", "", -1)
if err != nil {
b.Fatal(err)

View File

@ -369,7 +369,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
for i := 0; i < 3; i++ {
for range 3 {
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
// Used to test the listing for the case of multiple uploadID's for a given object.
res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts)
@ -392,7 +392,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
}
// Initiate Multipart Upload on bucketNames[2].
// Used to test the listing for the case of multiple objects for a given bucket.
for i := 0; i < 6; i++ {
for i := range 6 {
res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts)
if err != nil {
// Failed to create NewMultipartUpload, abort.
@ -2167,7 +2167,6 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
}
for _, testCase := range testCases {
testCase := testCase
t.(*testing.T).Run("", func(t *testing.T) {
opts = ObjectOptions{}
actualResult, actualErr := obj.CompleteMultipartUpload(t.Context(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})

View File

@ -226,7 +226,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) {
attributes = make(map[string]struct{})
for _, headerVal := range h.Values(xhttp.AmzObjectAttributes) {
for _, v := range strings.Split(strings.TrimSpace(headerVal), ",") {
for v := range strings.SplitSeq(strings.TrimSpace(headerVal), ",") {
if v != "" {
attributes[v] = struct{}{}
}

View File

@ -61,14 +61,14 @@ func benchmark(b *testing.B, data []string) {
b.Run("concat naive", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
concatNaive(data...)
}
})
b.Run("concat fast", func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
concat(data...)
}
})
@ -77,7 +77,7 @@ func benchmark(b *testing.B, data []string) {
func BenchmarkConcatImplementation(b *testing.B) {
data := make([]string, 2)
rng := rand.New(rand.NewSource(0))
for i := 0; i < 2; i++ {
for i := range 2 {
var tmp [16]byte
rng.Read(tmp[:])
data[i] = hex.EncodeToString(tmp[:])
@ -91,7 +91,7 @@ func BenchmarkPathJoinOld(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
pathJoinOld("volume", "path/path/path")
}
})
@ -102,7 +102,7 @@ func BenchmarkPathJoin(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
pathJoin("volume", "path/path/path")
}
})

Some files were not shown because too many files have changed in this diff Show More