diff --git a/cmd/admin-handlers-site-replication.go b/cmd/admin-handlers-site-replication.go index a44fb01cf..e49e86277 100644 --- a/cmd/admin-handlers-site-replication.go +++ b/cmd/admin-handlers-site-replication.go @@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re } } -func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error { +func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error { data, err := io.ReadAll(body) if err != nil { return SRError{ diff --git a/cmd/admin-handlers-users-race_test.go b/cmd/admin-handlers-users-race_test.go index b7308e476..79474a01f 100644 --- a/cmd/admin-handlers-users-race_test.go +++ b/cmd/admin-handlers-users-race_test.go @@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { // Create a policy policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { userCount := 50 accessKeys := make([]string, userCount) secretKeys := make([]string, userCount) - for i := 0; i < userCount; i++ { + for i := range userCount { accessKey, secretKey := mustGenerateCredentials(c) err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled) if err != nil { @@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { } g := errgroup.Group{} - for i := 0; i < userCount; i++ { + for i := range userCount { g.Go(func(i int) func() error { return func() error { uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "") diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 629c7b446..8530046b9 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "os" "slices" @@ -157,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - for k, v := range ldapUsers { - allCredentials[k] = v - } + maps.Copy(allCredentials, ldapUsers) // Marshal the response data, err := json.Marshal(allCredentials) @@ -2949,7 +2948,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth. name: createReq.Name, description: description, expiration: createReq.Expiration, - claims: make(map[string]interface{}), + claims: make(map[string]any), } condValues := getConditionValues(r, "", cred) diff --git a/cmd/admin-handlers-users_test.go b/cmd/admin-handlers-users_test.go index dcedb014a..2c7ca0536 100644 --- a/cmd/admin-handlers-users_test.go +++ b/cmd/admin-handlers-users_test.go @@ -332,7 +332,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) { // 2.2 create and associate policy to user policy := "mypolicy-test-user-update" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -355,7 +355,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -562,7 +562,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) { // 1. Create a policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -585,7 +585,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -680,7 +680,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) { c.Fatalf("bucket creat error: %v", err) } - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -703,7 +703,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) // Check that default policies can be overwritten. err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes) @@ -739,7 +739,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) { } policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -762,7 +762,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -911,7 +911,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -934,7 +934,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -995,7 +995,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1026,7 +1026,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1093,7 +1093,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1116,7 +1116,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) { ] } ] -}`, bucket, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1367,7 +1367,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) { svcAK, svcSK := mustGenerateCredentials(c) // This policy does not allow listing objects. - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1381,7 +1381,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) { ] } ] -}`, bucket)) +}`, bucket) cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ Policy: policyBytes, TargetUser: accessKey, @@ -1558,7 +1558,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string { c.Helper() versions := []string{} - for i := 0; i < 5; i++ { + for range 5 { ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{}) if err != nil { c.Fatalf("upload did not succeed got %#v", err) @@ -1627,7 +1627,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit svcAK, svcSK := mustGenerateCredentials(c) // This policy does not allow listing objects. - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1641,7 +1641,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit ] } ] -}`, bucket)) +}`, bucket) cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ Policy: policyBytes, TargetUser: accessKey, @@ -1655,7 +1655,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit c.mustNotListObjects(ctx, svcClient, bucket) // This policy allows listing objects. - newPolicyBytes := []byte(fmt.Sprintf(`{ + newPolicyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1668,7 +1668,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit ] } ] -}`, bucket)) +}`, bucket) err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{ NewPolicy: newPolicyBytes, }) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 355a4e126..42d498e79 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -954,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ var args dsync.LockArgs var lockers []dsync.NetLocker - for _, path := range strings.Split(vars["paths"], ",") { + for path := range strings.SplitSeq(vars["paths"], ",") { if path == "" { continue } @@ -1193,7 +1193,7 @@ type dummyFileInfo struct { mode os.FileMode modTime time.Time isDir bool - sys interface{} + sys any } func (f dummyFileInfo) Name() string { return f.name } @@ -1201,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size } func (f dummyFileInfo) Mode() os.FileMode { return f.mode } func (f dummyFileInfo) ModTime() time.Time { return f.modTime } func (f dummyFileInfo) IsDir() bool { return f.isDir } -func (f dummyFileInfo) Sys() interface{} { return f.sys } +func (f dummyFileInfo) Sys() any { return f.sys } // DownloadProfilingHandler - POST /minio/admin/v3/profiling/download // ---------- diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index ed4f7bc8b..3f8b3482b 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool { func TestTopLockEntries(t *testing.T) { locksHeld := make(map[string][]lockRequesterInfo) var owners []string - for i := 0; i < 4; i++ { + for i := range 4 { owners = append(owners, fmt.Sprintf("node-%d", i)) } @@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) { // request UID, but 10 different resource names associated with it. var lris []lockRequesterInfo uuid := mustGetUUID() - for i := 0; i < 10; i++ { + for i := range 10 { resource := fmt.Sprintf("bucket/delete-object-%d", i) lri := lockRequesterInfo{ Name: resource, @@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) { } // Add a few concurrent read locks to the mix - for i := 0; i < 50; i++ { + for i := range 50 { resource := fmt.Sprintf("bucket/get-object-%d", i) lri := lockRequesterInfo{ Name: resource, diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index 065f30d86..0b2976349 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -22,6 +22,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "net/http" "sort" "sync" @@ -520,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 { // Make a copy before returning the value retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap)) - for k, v := range h.scannedItemsMap { - retMap[k] = v - } + maps.Copy(retMap, h.scannedItemsMap) return retMap } @@ -534,9 +533,7 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 { // Make a copy before returning the value retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap)) - for k, v := range h.healedItemsMap { - retMap[k] = v - } + maps.Copy(retMap, h.healedItemsMap) return retMap } @@ -549,9 +546,7 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 { // Make a copy before returning the value retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap)) - for k, v := range h.healFailedItemsMap { - retMap[k] = v - } + maps.Copy(retMap, h.healFailedItemsMap) return retMap } diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 2dd36ea86..c2ca23fbf 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -65,7 +65,7 @@ func setCommonHeaders(w http.ResponseWriter) { } // Encodes the response headers into XML format. -func encodeResponse(response interface{}) []byte { +func encodeResponse(response any) []byte { var buf bytes.Buffer buf.WriteString(xml.Header) if err := xml.NewEncoder(&buf).Encode(response); err != nil { @@ -83,7 +83,7 @@ func encodeResponse(response interface{}) []byte { // Do not use this function for anything other than ListObjects() // variants, please open a github discussion if you wish to use // this in other places. -func encodeResponseList(response interface{}) []byte { +func encodeResponseList(response any) []byte { var buf bytes.Buffer buf.WriteString(xxml.Header) if err := xxml.NewEncoder(&buf).Encode(response); err != nil { @@ -94,7 +94,7 @@ func encodeResponseList(response interface{}) []byte { } // Encodes the response headers into JSON format. -func encodeResponseJSON(response interface{}) []byte { +func encodeResponseJSON(response any) []byte { var bytesBuffer bytes.Buffer e := json.NewEncoder(&bytesBuffer) e.Encode(response) diff --git a/cmd/api-response_test.go b/cmd/api-response_test.go index 6736e5259..8f89b3408 100644 --- a/cmd/api-response_test.go +++ b/cmd/api-response_test.go @@ -100,7 +100,6 @@ func TestObjectLocation(t *testing.T) { }, } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object) if testCase.expectedLocation != gotLocation { diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index 7b831d76b..6e824e312 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -216,7 +216,7 @@ func getSessionToken(r *http.Request) (token string) { // Fetch claims in the security token returned by the client, doesn't return // errors - upon errors the returned claims map will be empty. -func mustGetClaimsFromToken(r *http.Request) map[string]interface{} { +func mustGetClaimsFromToken(r *http.Request) map[string]any { claims, _ := getClaimsFromToken(getSessionToken(r)) return claims } @@ -266,7 +266,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) } // Fetch claims in the security token returned by the client. -func getClaimsFromToken(token string) (map[string]interface{}, error) { +func getClaimsFromToken(token string) (map[string]any, error) { jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey) if err != nil { return nil, err @@ -275,7 +275,7 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) { } // Fetch claims in the security token returned by the client and validate the token. -func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) { +func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) { token := getSessionToken(r) if token != "" && cred.AccessKey == "" { // x-amz-security-token is not allowed for anonymous access. diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 1f1c535f7..330bace41 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "os" + "slices" "sort" "strings" "sync" @@ -269,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error { func (h *healingTracker) isHealed(bucket string) bool { h.mu.RLock() defer h.mu.RUnlock() - for _, v := range h.HealedBuckets { - if v == bucket { - return true - } - } - return false + return slices.Contains(h.HealedBuckets, bucket) } // resume will reset progress to the numbers at the start of the bucket. diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index 1a1d3d598..484882e96 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -25,6 +25,7 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net/http" "net/url" @@ -574,9 +575,7 @@ func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo { oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass } - for k, v := range objInfo.UserMetadata { - oi.UserDefined[k] = v - } + maps.Copy(oi.UserDefined, objInfo.UserMetadata) return oi } diff --git a/cmd/batch-job-common-types.go b/cmd/batch-job-common-types.go index 83e1c554b..d02a71dd5 100644 --- a/cmd/batch-job-common-types.go +++ b/cmd/batch-job-common-types.go @@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error { type BatchJobSize int64 // UnmarshalYAML to parse humanized byte values -func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error { var batchExpireSz string err := unmarshal(&batchExpireSz) if err != nil { diff --git a/cmd/batch-rotate.go b/cmd/batch-rotate.go index 24414e270..3e8f18faf 100644 --- a/cmd/batch-rotate.go +++ b/cmd/batch-rotate.go @@ -21,6 +21,7 @@ import ( "context" "encoding/base64" "fmt" + "maps" "math/rand" "net/http" "runtime" @@ -110,9 +111,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error { } } e.kmsContext = kms.Context{} - for k, v := range ctx { - e.kmsContext[k] = v - } + maps.Copy(e.kmsContext, ctx) ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil { return err @@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob // Since we are rotating the keys, make sure to update the metadata. oi.metadataOnly = true oi.keyRotation = true - for k, v := range encMetadata { - oi.UserDefined[k] = v - } + maps.Copy(oi.UserDefined, encMetadata) if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{ VersionID: oi.VersionID, }, ObjectOptions{ diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index 17b04fe58..0f7025a9b 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -51,8 +51,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() // the actual benchmark for PutObject starts here. Reset the benchmark timer. - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { // insert the object. objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i), mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{}) @@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() // the actual benchmark for PutObjectPart starts here. Reset the benchmark timer. - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { // insert the object. totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize))) - for j := 0; j < totalPartsNR; j++ { + for j := range totalPartsNR { if j < totalPartsNR-1 { textPartData = textData[j*partSize : (j+1)*partSize-1] } else { diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index ce2846b7a..0713be646 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -154,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) { g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50) for index := range bucketsToBeUpdatedSlice { - index := index g.Go(func() error { return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index]) }, index) @@ -1387,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h // Set the correct hex md5sum for the fan-out stream. fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil)) - concurrentSize := 100 - if runtime.GOMAXPROCS(0) < concurrentSize { - concurrentSize = runtime.GOMAXPROCS(0) - } + concurrentSize := min(runtime.GOMAXPROCS(0), 100) fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries)) eventArgsList := make([]eventArgs, 0, len(fanOutEntries)) diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 49e390011..0adb9b8f1 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -657,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa sha256sum := "" var objectNames []string - for i := 0; i < 10; i++ { + for i := range 10 { contentBytes := []byte("hello") objectName := "test-object-" + strconv.Itoa(i) if i == 0 { @@ -687,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa // The following block will create a bucket policy with delete object to 'public/*'. This is // to test a mixed response of a successful & failure while deleting objects in a single request - policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)) + policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName) rec := httptest.NewRecorder() req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes), credentials.AccessKey, credentials.SecretKey, nil) diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index f57c2226e..24fdc67d1 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "strconv" "strings" @@ -959,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O UserDefined: meta, } } - for k, v := range objInfo.UserDefined { - meta[k] = v - } + maps.Copy(meta, objInfo.UserDefined) if len(objInfo.UserTags) != 0 { meta[xhttp.AmzObjectTagging] = objInfo.UserTags } diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index e4465ea1a..20be4ffd3 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -472,7 +472,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met return meta, reloaded, nil } - val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) { + val, err, _ := sys.group.Do(bucket, func() (val any, err error) { meta, err = loadBucketMetadata(ctx, objAPI, bucket) if err != nil { if !sys.Initialized() { @@ -511,7 +511,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri g := errgroup.WithNErrs(len(buckets)) bucketMetas := make([]BucketMetadata, len(buckets)) for index := range buckets { - index := index g.Go(func() error { // Sleep and stagger to avoid blocked CPU and thundering // herd upon start up sequence. diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index 402d36efe..e506aceb0 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute var wg sync.WaitGroup var mu sync.Mutex wg.Add(n) - for i := 0; i < n; i++ { + for range n { go func() { defer wg.Done() // Sync start. @@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Test case - 1. { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Expecting StatusBadRequest (400). { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: maxBucketPolicySize + 1, accessKey: credentials.AccessKey, @@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Expecting the HTTP response status to be StatusLengthRequired (411). { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: 0, accessKey: credentials.AccessKey, @@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // checkBucketPolicyResources should fail. { bucketName: bucketName1, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // should result in 404 StatusNotFound { bucketName: "non-existent-bucket", - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // should result in 404 StatusNotFound { bucketName: ".invalid-bucket", - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // should result in 400 StatusBadRequest. { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)), accessKey: credentials.AccessKey, diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index 4a2bd9249..0bcbe2e66 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -19,6 +19,7 @@ package cmd import ( "encoding/json" + "maps" "net/http" "net/url" "strconv" @@ -187,9 +188,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s } cloneURLValues := make(url.Values, len(r.Form)) - for k, v := range r.Form { - cloneURLValues[k] = v - } + maps.Copy(cloneURLValues, r.Form) for _, objLock := range []string{ xhttp.AmzObjectLockMode, @@ -224,7 +223,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s // Add groups claim which could be a list. This will ensure that the claim // `jwt:groups` works. if grpsVal, ok := claims["groups"]; ok { - if grpsIs, ok := grpsVal.([]interface{}); ok { + if grpsIs, ok := grpsVal.([]any); ok { grps := []string{} for _, gI := range grpsIs { if g, ok := gI.(string); ok { diff --git a/cmd/bucket-replication-utils.go b/cmd/bucket-replication-utils.go index 28bb7def1..7f17d833e 100644 --- a/cmd/bucket-replication-utils.go +++ b/cmd/bucket-replication-utils.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "fmt" + "maps" "net/http" "net/url" "regexp" @@ -311,7 +312,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD if len(s) == 0 { return } - for _, p := range strings.Split(s, ",") { + for p := range strings.SplitSeq(s, ",") { if p == "" { continue } @@ -735,9 +736,7 @@ type BucketReplicationResyncStatus struct { func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) { m = make(map[string]TargetReplicationResyncStatus) - for arn, st := range rs.TargetsMap { - m[arn] = st - } + maps.Copy(m, rs.TargetsMap) return } diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 71d753312..11d722d20 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net/http" "net/url" @@ -803,9 +804,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put } else { cs, mp := getCRCMeta(objInfo, 0, nil) // Set object checksum. - for k, v := range cs { - meta[k] = v - } + maps.Copy(meta, cs) isMP = mp if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject { // For objects where checksum is full object, it will be the same. @@ -969,9 +968,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati t, _ := tags.ParseObjectTags(oi1.UserTags) oi2Map := make(map[string]string) - for k, v := range oi2.UserTags { - oi2Map[k] = v - } + maps.Copy(oi2Map, oi2.UserTags) if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) { return replicateMetadata } @@ -1770,9 +1767,7 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri } if !copied { dst = make(map[string]string, len(metadata)) - for k, v := range metadata { - dst[k] = v - } + maps.Copy(dst, metadata) copied = true } delete(dst, key) @@ -2954,7 +2949,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object }() var wg sync.WaitGroup - for i := 0; i < resyncParallelRoutines; i++ { + for i := range resyncParallelRoutines { wg.Add(1) workers[i] = make(chan ReplicateObjectInfo, 100) i := i @@ -3063,7 +3058,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object workers[h%uint64(resyncParallelRoutines)] <- roi } } - for i := 0; i < resyncParallelRoutines; i++ { + for i := range resyncParallelRoutines { xioutil.SafeClose(workers[i]) } wg.Wait() @@ -3193,11 +3188,9 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []stri <-ctx.Done() return } - duration := time.Duration(r.Float64() * float64(time.Minute)) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(time.Minute)), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } } diff --git a/cmd/bucket-stats.go b/cmd/bucket-stats.go index 20b4ebebf..9a2ec0d22 100644 --- a/cmd/bucket-stats.go +++ b/cmd/bucket-stats.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" + "maps" "math" "sync/atomic" "time" @@ -221,9 +222,7 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) { } if s.Failed.ErrCounts == nil { s.Failed.ErrCounts = make(map[string]int) - for k, v := range st.Failed.ErrCounts { - s.Failed.ErrCounts[k] = v - } + maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts) } c.Stats[arn] = &s } diff --git a/cmd/bucket-targets.go b/cmd/bucket-targets.go index 0dda96c68..db93765c5 100644 --- a/cmd/bucket-targets.go +++ b/cmd/bucket-targets.go @@ -20,6 +20,7 @@ package cmd import ( "context" "errors" + "maps" "net/url" "sync" "time" @@ -236,9 +237,7 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth { sys.hMutex.RLock() defer sys.hMutex.RUnlock() m := make(map[string]epHealth, len(sys.hc)) - for k, v := range sys.hc { - m[k] = v - } + maps.Copy(m, sys.hc) return m } diff --git a/cmd/callhome.go b/cmd/callhome.go index 1a172a3ce..2a6d6695b 100644 --- a/cmd/callhome.go +++ b/cmd/callhome.go @@ -57,11 +57,9 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) { // callhome running on a different node. // sleep for some time and try again. - duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur())) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } }() diff --git a/cmd/common-main.go b/cmd/common-main.go index 55c5d2459..e57e190f9 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -105,7 +105,7 @@ func init() { gob.Register(madmin.TimeInfo{}) gob.Register(madmin.XFSErrorConfigs{}) gob.Register(map[string]string{}) - gob.Register(map[string]interface{}{}) + gob.Register(map[string]any{}) // All minio-go and madmin-go API operations shall be performed only once, // another way to look at this is we are turning off retries. @@ -258,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) { if !serverDebugLog { // Disable console logging if server debug log is not enabled - noLog := func(string, ...interface{}) {} + noLog := func(string, ...any) {} consoleapi.LogInfo = noLog consoleapi.LogError = noLog @@ -761,7 +761,7 @@ func serverHandleEnvVars() { domains := env.Get(config.EnvDomain, "") if len(domains) != 0 { - for _, domainName := range strings.Split(domains, config.ValueSeparator) { + for domainName := range strings.SplitSeq(domains, config.ValueSeparator) { if _, ok := dns2.IsDomainName(domainName); !ok { logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName), "Invalid MINIO_DOMAIN value in environment variable") @@ -1059,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } -func (a bgCtx) Value(key interface{}) interface{} { +func (a bgCtx) Value(key any) any { return a.parent.Value(key) } diff --git a/cmd/common-main_test.go b/cmd/common-main_test.go index bcd804299..9757267d2 100644 --- a/cmd/common-main_test.go +++ b/cmd/common-main_test.go @@ -43,7 +43,6 @@ func Test_readFromSecret(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { @@ -155,7 +154,6 @@ MINIO_ROOT_PASSWORD=minio123`, }, } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { diff --git a/cmd/config-current.go b/cmd/config-current.go index 76f114a8d..a87e2876a 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "maps" "strings" "sync" @@ -78,12 +79,8 @@ func initHelp() { config.BatchSubSys: batch.DefaultKVS, config.BrowserSubSys: browser.DefaultKVS, } - for k, v := range notify.DefaultNotificationKVS { - kvs[k] = v - } - for k, v := range lambda.DefaultLambdaKVS { - kvs[k] = v - } + maps.Copy(kvs, notify.DefaultNotificationKVS) + maps.Copy(kvs, lambda.DefaultLambdaKVS) if globalIsErasure { kvs[config.StorageClassSubSys] = storageclass.DefaultKVS kvs[config.HealSubSys] = heal.DefaultKVS diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 30d2e085e..0cceb1859 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -38,12 +38,12 @@ import ( ) // Save config file to corresponding backend -func Save(configFile string, data interface{}) error { +func Save(configFile string, data any) error { return quick.SaveConfig(data, configFile, globalEtcdClient) } // Load config from backend -func Load(configFile string, data interface{}) (quick.Config, error) { +func Load(configFile string, data any) (quick.Config, error) { return quick.LoadConfig(configFile, globalEtcdClient, data) } diff --git a/cmd/config.go b/cmd/config.go index 3a6dd6957..7311d628f 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte) return saveConfig(ctx, objAPI, historyFile, kv) } -func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error { +func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error { data, err := json.Marshal(cfg) if err != nil { return err diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 676a30ca3..c8f71b6fa 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -101,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st lastN = make([]log.Info, last) sys.RLock() - sys.logBuf.Do(func(p interface{}) { + sys.logBuf.Do(func(p any) { if p != nil { lg, ok := p.(log.Info) if ok && lg.SendLog(node, logKind) { @@ -155,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats { // Content returns the console stdout log func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) { sys.RLock() - sys.logBuf.Do(func(p interface{}) { + sys.logBuf.Do(func(p any) { if p != nil { lg, ok := p.(log.Info) if ok { @@ -181,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType { // Send log message 'e' to console and publish to console // log pubsub system -func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error { +func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error { var lg log.Info switch e := entry.(type) { case log.Entry: diff --git a/cmd/data-scanner-metric.go b/cmd/data-scanner-metric.go index 8e0990ee4..1a8f201fb 100644 --- a/cmd/data-scanner-metric.go +++ b/cmd/data-scanner-metric.go @@ -198,7 +198,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p func (p *scannerMetrics) getCurrentPaths() []string { var res []string prefix := globalLocalNodeName + "/" - p.currentPaths.Range(func(key, value interface{}) bool { + p.currentPaths.Range(func(key, value any) bool { // We are a bit paranoid, but better miss an entry than crash. name, ok := key.(string) if !ok { @@ -221,7 +221,7 @@ func (p *scannerMetrics) getCurrentPaths() []string { // (since this is concurrent it may not be 100% reliable) func (p *scannerMetrics) activeDrives() int { var i int - p.currentPaths.Range(func(k, v interface{}) bool { + p.currentPaths.Range(func(k, v any) bool { i++ return true }) @@ -299,7 +299,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics { m.CollectedAt = time.Now() m.ActivePaths = p.getCurrentPaths() m.LifeTimeOps = make(map[string]uint64, scannerMetricLast) - for i := scannerMetric(0); i < scannerMetricLast; i++ { + for i := range scannerMetricLast { if n := atomic.LoadUint64(&p.operations[i]); n > 0 { m.LifeTimeOps[i.String()] = n } @@ -309,7 +309,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics { } m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime) - for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ { + for i := range scannerMetricLastRealtime { lm := p.lastMinute(i) if lm.N > 0 { m.LastMinute.Actions[i.String()] = lm.asTimedAction() diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index a12345511..288f4d716 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -78,11 +78,9 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) { // Run the data scanner in a loop for { runDataScanner(ctx, objAPI) - duration := time.Duration(r.Float64() * float64(scannerCycle.Load())) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } }() diff --git a/cmd/data-scanner_test.go b/cmd/data-scanner_test.go index 7de47422f..c45ad1e79 100644 --- a/cmd/data-scanner_test.go +++ b/cmd/data-scanner_test.go @@ -127,7 +127,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) { v2 uuid-2 modTime -3m v1 uuid-1 modTime -4m */ - for i := 0; i < 5; i++ { + for i := range 5 { fivs[i] = FileInfo{ Volume: bucket, Name: obj, diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 9e030a115..b41574113 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net/http" "path" @@ -99,9 +100,7 @@ func (ats *allTierStats) clone() *allTierStats { } dst := *ats dst.Tiers = make(map[string]tierStats, len(ats.Tiers)) - for tier, st := range ats.Tiers { - dst.Tiers[tier] = st - } + maps.Copy(dst.Tiers, ats.Tiers) return &dst } @@ -347,9 +346,7 @@ func (e dataUsageEntry) clone() dataUsageEntry { // We operate on a copy from the receiver. if e.Children != nil { ch := make(dataUsageHashMap, len(e.Children)) - for k, v := range e.Children { - ch[k] = v - } + maps.Copy(ch, e.Children) e.Children = ch } diff --git a/cmd/data-usage_test.go b/cmd/data-usage_test.go index 56ed0b9c3..7cbd98e6a 100644 --- a/cmd/data-usage_test.go +++ b/cmd/data-usage_test.go @@ -179,7 +179,7 @@ func TestDataUsageUpdate(t *testing.T) { t.Fatal(err) } // Changed dir must be picked up in this many cycles. - for i := 0; i < dataUsageUpdateDirCycles; i++ { + for range dataUsageUpdateDirCycles { got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep) got.Info.NextCycle++ if err != nil { @@ -428,7 +428,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { t.Fatal(err) } // Changed dir must be picked up in this many cycles. - for i := 0; i < dataUsageUpdateDirCycles; i++ { + for range dataUsageUpdateDirCycles { got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep) got.Info.NextCycle++ if err != nil { @@ -526,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi // generateUsageTestFiles create nFolders * nFiles files of size bytes each. func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) { pl := make([]byte, size) - for i := 0; i < nFolders; i++ { + for i := range nFolders { name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt") err := os.MkdirAll(filepath.Dir(name), os.ModePerm) if err != nil { t.Fatal(err) } - for j := 0; j < nFiles; j++ { + for j := range nFiles { name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt") err = os.WriteFile(name, pl, os.ModePerm) if err != nil { @@ -618,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) { } // equalAsJSON returns whether the values are equal when encoded as JSON. -func equalAsJSON(a, b interface{}) bool { +func equalAsJSON(a, b any) bool { aj, err := json.Marshal(a) if err != nil { panic(err) diff --git a/cmd/dynamic-timeouts.go b/cmd/dynamic-timeouts.go index bc9b1c42f..9c4f297bb 100644 --- a/cmd/dynamic-timeouts.go +++ b/cmd/dynamic-timeouts.go @@ -129,12 +129,9 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { if failPct > dynamicTimeoutIncreaseThresholdPct { // We are hitting the timeout too often, so increase the timeout by 25% - timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100 - - // Set upper cap. - if timeout > int64(maxDynamicTimeout) { - timeout = int64(maxDynamicTimeout) - } + timeout := min( + // Set upper cap. + atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout)) // Safety, shouldn't happen if timeout < dt.minimum { timeout = dt.minimum diff --git a/cmd/dynamic-timeouts_test.go b/cmd/dynamic-timeouts_test.go index c8f4c42f8..b353b983b 100644 --- a/cmd/dynamic-timeouts_test.go +++ b/cmd/dynamic-timeouts_test.go @@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogFailure() } @@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogFailure() } adjusted := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogFailure() } @@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogSuccess(20 * time.Second) } @@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogSuccess(20 * time.Second) } adjusted := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogSuccess(20 * time.Second) } @@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) { initial := timeout.Timeout() const successTimeout = 20 * time.Second - for l := 0; l < 100; l++ { - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range 100 { + for range dynamicTimeoutLogSize { timeout.LogSuccess(successTimeout) } } @@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) { rng := rand.New(rand.NewSource(int64(i))) go func() { defer wg.Done() - for i := 0; i < 100; i++ { - for j := 0; j < 100; j++ { + for range 100 { + for range 100 { timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64())) } to := timeout.Timeout() @@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) { initial := timeout.Timeout() const successTimeout = 20 * time.Second - for l := 0; l < 100; l++ { - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range 100 { + for range dynamicTimeoutLogSize { timeout.LogSuccess(successTimeout) } } @@ -166,13 +166,9 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) { func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) { const successTimeout = 20 * time.Second - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { rnd := f() - duration := time.Duration(float64(successTimeout) * rnd) - - if duration < 100*time.Millisecond { - duration = 100 * time.Millisecond - } + duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond) if duration >= time.Minute { timeout.LogFailure() } else { @@ -188,7 +184,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) { initial := timeout.Timeout() - for try := 0; try < 10; try++ { + for range 10 { testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64) } @@ -205,7 +201,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) { initial := timeout.Timeout() - for try := 0; try < 10; try++ { + for range 10 { testDynamicTimeoutAdjust(t, timeout, func() float64 { return 1.0 + rand.NormFloat64() }) diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index 56b46c1f2..a42117271 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -29,6 +29,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "path" "strconv" @@ -117,10 +118,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error { names = make([]string, 0, BatchSize) ) for len(objects) > 0 { - N := BatchSize - if len(objects) < BatchSize { - N = len(objects) - } + N := min(len(objects), BatchSize) batch := objects[:N] // We have to decrypt only ETags of SSE-S3 single-part @@ -317,9 +315,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt // of the client provided context and add the bucket // key, if not present. kmsCtx := kms.Context{} - for k, v := range cryptoCtx { - kmsCtx[k] = v - } + maps.Copy(kmsCtx, cryptoCtx) if _, ok := kmsCtx[bucket]; !ok { kmsCtx[bucket] = path.Join(bucket, object) } @@ -389,9 +385,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key // of the client provided context and add the bucket // key, if not present. kmsCtx := kms.Context{} - for k, v := range cryptoCtx { - kmsCtx[k] = v - } + maps.Copy(kmsCtx, cryptoCtx) if _, ok := kmsCtx[bucket]; !ok { kmsCtx[bucket] = path.Join(bucket, object) } diff --git a/cmd/encryption-v1_test.go b/cmd/encryption-v1_test.go index ec441b4f0..a09a391f1 100644 --- a/cmd/encryption-v1_test.go +++ b/cmd/encryption-v1_test.go @@ -384,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) { // Simple useful utilities repeat = func(k int64, n int) []int64 { a := []int64{} - for i := 0; i < n; i++ { + for range n { a = append(a, k) } return a @@ -471,10 +471,7 @@ func TestGetDecryptedRange(t *testing.T) { // round up the lbPartOffset // to the end of the // corresponding DARE package - lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz - if lbPkgEndOffset > v { - lbPkgEndOffset = v - } + lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v) bytesToDrop := v - lbPkgEndOffset // Last segment to update `l` diff --git a/cmd/endpoint-ellipses.go b/cmd/endpoint-ellipses.go index b74d6a886..2c76f41cb 100644 --- a/cmd/endpoint-ellipses.go +++ b/cmd/endpoint-ellipses.go @@ -22,7 +22,7 @@ import ( "fmt" "net/url" "runtime" - "sort" + "slices" "strings" "github.com/cespare/xxhash/v2" @@ -122,9 +122,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar // eyes that we prefer a sorted setCount slice for the // subsequent function to figure out the right common // divisor, it avoids loops. - sort.Slice(setCounts, func(i, j int) bool { - return setCounts[i] < setCounts[j] - }) + slices.Sort(setCounts) return setCounts } diff --git a/cmd/endpoint-ellipses_test.go b/cmd/endpoint-ellipses_test.go index ee5b27ee4..6caaebbb6 100644 --- a/cmd/endpoint-ellipses_test.go +++ b/cmd/endpoint-ellipses_test.go @@ -55,7 +55,6 @@ func TestCreateServerEndpoints(t *testing.T) { } for i, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { srvCtxt := serverCtxt{} err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt) @@ -85,7 +84,6 @@ func TestGetDivisibleSize(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { gotGCD := getDivisibleSize(testCase.totalSizes) if testCase.result != gotGCD { @@ -172,7 +170,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) for i, arg := range testCase.args { @@ -294,7 +291,6 @@ func TestGetSetIndexes(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) for i, arg := range testCase.args { @@ -637,7 +633,6 @@ func TestParseEndpointSet(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { gotEs, err := parseEndpointSet(0, testCase.arg) if err != nil && testCase.success { diff --git a/cmd/endpoint_test.go b/cmd/endpoint_test.go index 5fd31ed8b..1eb034ed9 100644 --- a/cmd/endpoint_test.go +++ b/cmd/endpoint_test.go @@ -312,7 +312,6 @@ func TestCreateEndpoints(t *testing.T) { } for i, testCase := range testCases { - i := i testCase := testCase t.Run("", func(t *testing.T) { var srvCtxt serverCtxt diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go index c825f3181..8560b585d 100644 --- a/cmd/erasure-coding.go +++ b/cmd/erasure-coding.go @@ -136,10 +136,7 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64 shardSize := e.ShardSize() shardFileSize := e.ShardFileSize(totalLength) endShard := (startOffset + length) / e.blockSize - tillOffset := endShard*shardSize + shardSize - if tillOffset > shardFileSize { - tillOffset = shardFileSize - } + tillOffset := min(endShard*shardSize+shardSize, shardFileSize) return tillOffset } diff --git a/cmd/erasure-common.go b/cmd/erasure-common.go index 350a1aba7..7146766ac 100644 --- a/cmd/erasure-common.go +++ b/cmd/erasure-common.go @@ -30,7 +30,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) { var mu sync.Mutex r := rand.New(rand.NewSource(time.Now().UnixNano())) for _, i := range r.Perm(len(disks)) { - i := i wg.Add(1) go func() { defer wg.Done() diff --git a/cmd/erasure-decode_test.go b/cmd/erasure-decode_test.go index 4851a8e6c..229047e16 100644 --- a/cmd/erasure-decode_test.go +++ b/cmd/erasure-decode_test.go @@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { buf := &bytes.Buffer{} // Verify erasure.Decode() for random offsets and lengths. - for i := 0; i < iterations; i++ { + for range iterations { offset := r.Int63n(length) readLen := r.Int63n(length - offset) @@ -308,17 +308,16 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, b.Fatalf("failed to create erasure test file: %v", err) } - for i := 0; i < dataDown; i++ { + for i := range dataDown { writers[i] = nil } for i := data; i < data+parityDown; i++ { writers[i] = nil } - b.ResetTimer() b.SetBytes(size) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { bitrotReaders := make([]io.ReaderAt, len(disks)) for index, disk := range disks { if writers[index] == nil { diff --git a/cmd/erasure-encode_test.go b/cmd/erasure-encode_test.go index b55105a21..54a37b71a 100644 --- a/cmd/erasure-encode_test.go +++ b/cmd/erasure-encode_test.go @@ -172,17 +172,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, buffer := make([]byte, blockSizeV2, 2*blockSizeV2) content := make([]byte, size) - for i := 0; i < dataDown; i++ { + for i := range dataDown { disks[i] = OfflineDisk } for i := data; i < data+parityDown; i++ { disks[i] = OfflineDisk } - b.ResetTimer() b.SetBytes(size) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { writers := make([]io.Writer, len(disks)) for i, disk := range disks { if disk == OfflineDisk { diff --git a/cmd/erasure-heal_test.go b/cmd/erasure-heal_test.go index 994dea965..cb6b25fd0 100644 --- a/cmd/erasure-heal_test.go +++ b/cmd/erasure-heal_test.go @@ -102,7 +102,7 @@ func TestErasureHeal(t *testing.T) { // setup stale disks for the test case staleDisks := make([]StorageAPI, len(disks)) copy(staleDisks, disks) - for j := 0; j < len(staleDisks); j++ { + for j := range staleDisks { if j < test.offDisks { readers[j] = nil } else { diff --git a/cmd/erasure-healing-common_test.go b/cmd/erasure-healing-common_test.go index 800f1d544..04909b2df 100644 --- a/cmd/erasure-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -175,7 +175,7 @@ func TestListOnlineDisks(t *testing.T) { fourNanoSecs := time.Unix(4, 0).UTC() modTimesThreeNone := make([]time.Time, 16) modTimesThreeFour := make([]time.Time, 16) - for i := 0; i < 16; i++ { + for i := range 16 { // Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one // to be tampered with. if i > 12 { @@ -244,7 +244,6 @@ func TestListOnlineDisks(t *testing.T) { } for i, test := range testCases { - test := test t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) if err != nil { @@ -350,7 +349,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { fourNanoSecs := time.Unix(4, 0).UTC() modTimesThreeNone := make([]time.Time, 16) modTimesThreeFour := make([]time.Time, 16) - for i := 0; i < 16; i++ { + for i := range 16 { // Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one // to be tampered with. if i > 12 { @@ -419,7 +418,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { } for i, test := range testCases { - test := test t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { _, err := obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) @@ -753,7 +751,7 @@ func TestCommonParities(t *testing.T) { } for idx, test := range tests { var metaArr []FileInfo - for i := 0; i < 12; i++ { + for i := range 12 { fi := test.fi1 if i%2 == 0 { fi = test.fi2 diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index 09337a175..0cece0df5 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -116,7 +116,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string, func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error { g := errgroup.WithNErrs(len(storageDisks)) for index := range storageDisks { - index := index g.Go(func() error { if storageDisks[index] == nil { // we ignore disk not found errors diff --git a/cmd/erasure-healing_test.go b/cmd/erasure-healing_test.go index 5cf4750d6..c19fddd02 100644 --- a/cmd/erasure-healing_test.go +++ b/cmd/erasure-healing_test.go @@ -296,7 +296,6 @@ func TestIsObjectDangling(t *testing.T) { // Add new cases as seen } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.name, func(t *testing.T) { gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs) if !gotMeta.Equals(testCase.expectedMeta) { diff --git a/cmd/erasure-metadata-utils.go b/cmd/erasure-metadata-utils.go index 067d3609b..1409d99ec 100644 --- a/cmd/erasure-metadata-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -204,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, g := errgroup.WithNErrs(len(disks)) // Read `xl.meta` in parallel across disks. for index := range disks { - index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound diff --git a/cmd/erasure-metadata-utils_test.go b/cmd/erasure-metadata-utils_test.go index 22c28e2e7..b14cc25df 100644 --- a/cmd/erasure-metadata-utils_test.go +++ b/cmd/erasure-metadata-utils_test.go @@ -55,7 +55,7 @@ func TestDiskCount(t *testing.T) { // of errors into a single maximal error with in the list. func TestReduceErrs(t *testing.T) { canceledErrs := make([]error, 0, 5) - for i := 0; i < 5; i++ { + for i := range 5 { canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled)) } // List all of all test cases to validate various cases of reduce errors. @@ -222,7 +222,7 @@ func Test_hashOrder(t *testing.T) { var tmp [16]byte rng.Read(tmp[:]) prefix := hex.EncodeToString(tmp[:]) - for i := 0; i < 10000; i++ { + for range 10000 { rng.Read(tmp[:]) y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x) diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go index ce053a07d..b812980b5 100644 --- a/cmd/erasure-metadata.go +++ b/cmd/erasure-metadata.go @@ -408,7 +408,6 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc // Start writing `xl.meta` to all disks in parallel. for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound diff --git a/cmd/erasure-metadata_test.go b/cmd/erasure-metadata_test.go index 1e175ab8a..76ee2e102 100644 --- a/cmd/erasure-metadata_test.go +++ b/cmd/erasure-metadata_test.go @@ -189,7 +189,7 @@ func TestFindFileInfoInQuorum(t *testing.T) { commonNumVersions := 2 numVersionsInQuorum := make([]int, 16) numVersionsNoQuorum := make([]int, 16) - for i := 0; i < 16; i++ { + for i := range 16 { if i < 4 { continue } @@ -269,7 +269,6 @@ func TestFindFileInfoInQuorum(t *testing.T) { } for _, test := range tests { - test := test t.Run("", func(t *testing.T) { fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum) _, ok1 := err.(InsufficientReadQuorum) @@ -316,7 +315,7 @@ func TestTransitionInfoEquals(t *testing.T) { } var i uint - for i = 0; i < 8; i++ { + for i = range uint(8) { fi := FileInfo{ TransitionTier: inputs[0].tier, TransitionedObjName: inputs[0].remoteObjName, diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index 5a866631c..0a2f527b7 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -322,7 +322,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec uploads = append(uploads, MultipartInfo{ Bucket: bucket, Object: object, - UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))), + UploadID: base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadID)), Initiated: startTime, }) populatedUploadIDs.Add(uploadID) @@ -498,7 +498,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, partsMetadata[index].Metadata = userDefined } uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano()) - uploadID := base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadUUID))) + uploadID := base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadUUID)) uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID) // Write updated `xl.meta` to all disks. @@ -540,7 +540,6 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src // Rename file on all underlying storage disks. for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -820,7 +819,6 @@ func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI objectParts := make([][]string, len(onlineDisks)) // List uploaded parts from drives. for index := range onlineDisks { - index := index g.Go(func() (err error) { if onlineDisks[index] == nil { return errDiskNotFound @@ -995,7 +993,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP objectPartInfos := make([][]*ObjectPartInfo, len(disks)) // Rename file on all underlying storage disks. for index := range disks { - index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index f103fc440..76e4d5269 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "path" "runtime" @@ -542,7 +543,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st disks := er.getDisks() g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -575,7 +575,6 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object rawFileInfos := make([]RawFileInfo, len(disks)) g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound @@ -1029,7 +1028,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str dataDirs := make([]string, len(disks)) // Rename file on all underlying storage disks. for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -1631,7 +1629,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -1836,7 +1833,6 @@ func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object } g := errgroup.WithNErrs(len(onlineDisks)) for index := range onlineDisks { - index := index g.Go(func() error { if onlineDisks[index] == nil { return nil @@ -1862,7 +1858,6 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return nil @@ -2222,9 +2217,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s return ObjectInfo{}, err } } - for k, v := range objInfo.UserDefined { - fi.Metadata[k] = v - } + maps.Copy(fi.Metadata, objInfo.UserDefined) fi.ModTime = opts.MTime fi.VersionID = opts.VersionID @@ -2294,9 +2287,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin fi.Metadata[xhttp.AmzObjectTagging] = tags fi.ReplicationState = opts.PutReplicationState() - for k, v := range opts.UserDefined { - fi.Metadata[k] = v - } + maps.Copy(fi.Metadata, opts.UserDefined) if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) @@ -2314,7 +2305,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o // Start writing `xl.meta` to all disks in parallel. for index := range onlineDisks { - index := index g.Go(func() error { if onlineDisks[index] == nil { return errDiskNotFound diff --git a/cmd/erasure-object_test.go b/cmd/erasure-object_test.go index 638644eb1..03f452f53 100644 --- a/cmd/erasure-object_test.go +++ b/cmd/erasure-object_test.go @@ -112,7 +112,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) { t.Fatalf("Erasure Object upload failed: %s", err) } for _, test := range testCases { - test := test t.Run("", func(t *testing.T) { _, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{}) if err != nil { @@ -625,7 +624,7 @@ func TestGetObjectNoQuorum(t *testing.T) { t.Fatal(err) } - for f := 0; f < 2; f++ { + for f := range 2 { diskErrors := make(map[int]error) for i := 0; i <= f; i++ { diskErrors[i] = nil @@ -774,7 +773,7 @@ func TestPutObjectNoQuorum(t *testing.T) { // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,4) - for f := 0; f < 2; f++ { + for f := range 2 { diskErrors := make(map[int]error) for i := 0; i <= f; i++ { diskErrors[i] = nil @@ -837,7 +836,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) { // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,2) - for f := 0; f < 2; f++ { + for f := range 2 { t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) { diskErrors := make(map[int]error) for i := 0; i <= f; i++ { @@ -1109,7 +1108,6 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin {parts7, errs7, 11, 11, parts7SC, nil}, } for _, tt := range tests { - tt := tt t.(*testing.T).Run("", func(t *testing.T) { globalStorageClass.Update(tt.storageClassCfg) actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks))) diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index ac72f8244..5062fb371 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -25,6 +25,7 @@ import ( "io" "math/rand" "net/http" + "slices" "sort" "strings" "time" @@ -117,12 +118,7 @@ func (pd *PoolDecommissionInfo) bucketPop(bucket string) bool { } func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool { - for _, b := range pd.DecommissionedBuckets { - if b == bucket { - return true - } - } - return false + return slices.Contains(pd.DecommissionedBuckets, bucket) } func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) { @@ -792,8 +788,6 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool } for setIdx, set := range pool.sets { - set := set - filterLifecycle := func(bucket, object string, fi FileInfo) bool { if lc == nil { return false @@ -901,7 +895,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool } // gr.Close() is ensured by decommissionObject(). - for try := 0; try < 3; try++ { + for range 3 { if version.IsRemote() { if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{ VersionID: versionID, diff --git a/cmd/erasure-server-pool-decom_test.go b/cmd/erasure-server-pool-decom_test.go index 567e6b367..7f4c8ef17 100644 --- a/cmd/erasure-server-pool-decom_test.go +++ b/cmd/erasure-server-pool-decom_test.go @@ -176,7 +176,6 @@ func TestPoolMetaValidate(t *testing.T) { t.Parallel() for _, testCase := range testCases { - testCase := testCase t.Run(testCase.name, func(t *testing.T) { update, err := testCase.meta.validate(testCase.pools) if testCase.expectedErr { diff --git a/cmd/erasure-server-pool-rebalance.go b/cmd/erasure-server-pool-rebalance.go index 0c40ffb3e..f8078c3f2 100644 --- a/cmd/erasure-server-pool-rebalance.go +++ b/cmd/erasure-server-pool-rebalance.go @@ -580,8 +580,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, } for setIdx, set := range pool.sets { - set := set - filterLifecycle := func(bucket, object string, fi FileInfo) bool { if lc == nil { return false @@ -594,7 +592,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal) return true } - return false } @@ -689,7 +686,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, continue } - for try := 0; try < 3; try++ { + for range 3 { // GetObjectReader.Close is called by rebalanceObject gr, err := set.GetObjectNInfo(ctx, bucket, diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index d13539e06..3c301bef1 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -420,7 +420,6 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b nSets := make([]int, len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools)) for index := range z.serverPools { - index := index // Skip suspended pools or pools participating in rebalance for any new // I/O. if z.IsSuspended(index) || z.IsPoolRebalancing(index) { @@ -660,7 +659,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error { g := errgroup.WithNErrs(len(z.serverPools)) for index := range z.serverPools { - index := index g.Go(func() error { return z.serverPools[index].Shutdown(ctx) }, index) @@ -712,7 +710,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) storageInfos := make([]StorageInfo, len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools)) for index := range z.serverPools { - index := index g.Go(func() error { storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics) return nil @@ -1268,7 +1265,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools)) for i, pool := range z.serverPools { - i := i pool := pool eg.Go(func() error { dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts) @@ -2244,7 +2240,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re for poolIdx, erasureSet := range z.serverPools { for setIdx, set := range erasureSet.sets { - set := set listOut := make(chan metaCacheEntry, 1) entries = append(entries, listOut) disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true) diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index fafc2d6fc..9ceb80856 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -392,7 +392,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ var lk sync.Mutex for i := range setCount { lockerEpSet := set.NewStringSet() - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { wg.Add(1) go func(i int, endpoint Endpoint) { defer wg.Done() @@ -415,7 +415,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ defer wg.Done() var innerWg sync.WaitGroup - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { disk := storageDisks[i*setDriveCount+j] if disk == nil { continue @@ -593,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo { g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { - index := index g.Go(func() error { storageInfos[index] = s.sets[index].StorageInfo(ctx) return nil @@ -618,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { - index := index g.Go(func() error { storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics) return nil @@ -641,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error { g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { - index := index g.Go(func() error { return s.sets[index].Shutdown(ctx) }, index) @@ -705,7 +702,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) { func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error { g := errgroup.WithNErrs(len(storageDisks)) for index := range storageDisks { - index := index g.Go(func() error { if storageDisks[index] == nil { // we ignore disk not found errors diff --git a/cmd/erasure-sets_test.go b/cmd/erasure-sets_test.go index d457f2669..63311d557 100644 --- a/cmd/erasure-sets_test.go +++ b/cmd/erasure-sets_test.go @@ -40,13 +40,12 @@ func BenchmarkCrcHash(b *testing.B) { {1024}, } for _, testCase := range cases { - testCase := testCase key := randString(testCase.key) b.Run("", func(b *testing.B) { b.SetBytes(1024) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { crcHashMod(key, 16) } }) @@ -65,13 +64,12 @@ func BenchmarkSipHash(b *testing.B) { {1024}, } for _, testCase := range cases { - testCase := testCase key := randString(testCase.key) b.Run("", func(b *testing.B) { b.SetBytes(1024) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { sipHashMod(key, 16, testUUID) } }) @@ -164,7 +162,7 @@ func TestNewErasureSets(t *testing.T) { nDisks := 16 // Maximum disks. var erasureDisks []string - for i := 0; i < nDisks; i++ { + for range nDisks { // Do not attempt to create this path, the test validates // so that newErasureSets initializes non existing paths // and successfully returns initialized object layer. diff --git a/cmd/erasure.go b/cmd/erasure.go index cb8662e0d..faae87bd9 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "maps" "math/rand" "os" "runtime" @@ -175,7 +176,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() error { di := madmin.Disk{ Endpoint: endpoints[index].String(), @@ -219,9 +219,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks di.Metrics.LastMinute[k] = v.asTimedAction() } } - for k, v := range info.Metrics.APICalls { - di.Metrics.APICalls[k] = v - } + maps.Copy(di.Metrics.APICalls, info.Metrics.APICalls) if info.Total > 0 { di.Utilization = float64(info.Used / info.Total * 100) } @@ -287,7 +285,6 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new infos := make([]DiskInfo, len(disks)) r := rand.New(rand.NewSource(time.Now().UnixNano())) for _, i := range r.Perm(len(disks)) { - i := i wg.Add(1) go func() { defer wg.Done() diff --git a/cmd/fmt-gen.go b/cmd/fmt-gen.go index 8e8739573..c616c4dc0 100644 --- a/cmd/fmt-gen.go +++ b/cmd/fmt-gen.go @@ -99,7 +99,7 @@ func fmtGenMain(ctxt *cli.Context) { format := newFormatErasureV3(setCount, setDriveCount) format.ID = deploymentID for i := range setCount { // for each erasure set - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] if deploymentID != "" { diff --git a/cmd/format-erasure.go b/cmd/format-erasure.go index 5cbcb495a..6ff6d9e99 100644 --- a/cmd/format-erasure.go +++ b/cmd/format-erasure.go @@ -159,7 +159,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 { for i := range numSets { format.Erasure.Sets[i] = make([]string, setLen) - for j := 0; j < setLen; j++ { + for j := range setLen { format.Erasure.Sets[i][j] = mustGetUUID() } } @@ -324,7 +324,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur // Load format from each disk in parallel for index := range storageDisks { - index := index g.Go(func() error { if storageDisks[index] == nil { return errDiskNotFound @@ -530,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format // Write `format.json` to all disks. for index := range storageDisks { - index := index g.Go(func() error { if formats[index] == nil { return errDiskNotFound @@ -566,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag storageDisks := make([]StorageAPI, len(endpoints)) g := errgroup.WithNErrs(len(endpoints)) for index := range endpoints { - index := index g.Go(func() (err error) { storageDisks[index], err = newStorageAPI(endpoints[index], opts) return err @@ -600,7 +597,6 @@ func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool { func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error { g := errgroup.WithNErrs(len(formats)) for i := range formats { - i := i g.Go(func() error { if formats[i] == nil || !endpoints[i].IsLocal { return nil @@ -641,7 +637,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, for i := range setCount { hostCount := make(map[string]int, setDriveCount) - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { disk := storageDisks[i*setDriveCount+j] newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] @@ -662,7 +658,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, return } logger.Info(" * Set %v:", i+1) - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { disk := storageDisks[i*setDriveCount+j] logger.Info(" - Drive: %s", disk.String()) } diff --git a/cmd/format-erasure_test.go b/cmd/format-erasure_test.go index 6cc1c179a..732f6728e 100644 --- a/cmd/format-erasure_test.go +++ b/cmd/format-erasure_test.go @@ -48,7 +48,7 @@ func TestFixFormatV3(t *testing.T) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 8) - for j := 0; j < 8; j++ { + for j := range 8 { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat @@ -79,7 +79,7 @@ func TestFormatErasureEmpty(t *testing.T) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 16) - for j := 0; j < 16; j++ { + for j := range 16 { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat @@ -276,8 +276,8 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 32) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat @@ -390,18 +390,17 @@ func BenchmarkGetFormatErasureInQuorumOld(b *testing.B) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 15*200) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat } } - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = getFormatErasureInQuorumOld(formats) } } @@ -414,18 +413,17 @@ func BenchmarkGetFormatErasureInQuorum(b *testing.B) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 15*200) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat } } - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = getFormatErasureInQuorum(formats) } } @@ -440,8 +438,8 @@ func TestNewFormatSets(t *testing.T) { formats := make([]*formatErasureV3, 32) errs := make([]error, 32) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat diff --git a/cmd/ftp-server-driver.go b/cmd/ftp-server-driver.go index 88571e203..7f21eeb8d 100644 --- a/cmd/ftp-server-driver.go +++ b/cmd/ftp-server-driver.go @@ -98,7 +98,7 @@ func (m *minioFileInfo) IsDir() bool { return m.isDir } -func (m *minioFileInfo) Sys() interface{} { +func (m *minioFileInfo) Sys() any { return nil } @@ -316,7 +316,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) if err != nil { return nil, err } - claims := make(map[string]interface{}) + claims := make(map[string]any) claims[expClaim] = UTCNow().Add(expiryDur).Unix() claims[ldapUser] = lookupResult.NormDN diff --git a/cmd/ftp-server.go b/cmd/ftp-server.go index a7b2841b0..562336e33 100644 --- a/cmd/ftp-server.go +++ b/cmd/ftp-server.go @@ -33,14 +33,14 @@ var globalRemoteFTPClientTransport = NewRemoteTargetHTTPTransport(true)() type minioLogger struct{} // Print implement Logger -func (log *minioLogger) Print(sessionID string, message interface{}) { +func (log *minioLogger) Print(sessionID string, message any) { if serverDebugLog { fmt.Printf("%s %s\n", sessionID, message) } } // Printf implement Logger -func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) { +func (log *minioLogger) Printf(sessionID string, format string, v ...any) { if serverDebugLog { if sessionID != "" { fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...)) diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index 55986fdd8..88a111668 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -23,6 +23,7 @@ import ( "net/http" "path" "runtime/debug" + "slices" "strings" "sync/atomic" "time" @@ -396,18 +397,16 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler { if k == "delimiter" { // delimiters are allowed to have `.` or `..` continue } - for _, v := range vv { - if hasBadPathComponent(v) { - if ok { - tc.FuncName = "handler.ValidRequest" - tc.ResponseRecorder.LogErrBody = true - } - - defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r)) - writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL) - atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1) - return + if slices.ContainsFunc(vv, hasBadPathComponent) { + if ok { + tc.FuncName = "handler.ValidRequest" + tc.ResponseRecorder.LogErrBody = true } + + defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r)) + writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL) + atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1) + return } } if hasMultipleAuth(r) { diff --git a/cmd/generic-handlers_test.go b/cmd/generic-handlers_test.go index 03813ebc0..c2d71ae9d 100644 --- a/cmd/generic-handlers_test.go +++ b/cmd/generic-handlers_test.go @@ -90,7 +90,7 @@ var isHTTPHeaderSizeTooLargeTests = []struct { func generateHeader(size, usersize int) http.Header { header := http.Header{} - for i := 0; i < size; i++ { + for i := range size { header.Set(strconv.Itoa(i), "") } userlength := 0 @@ -136,7 +136,6 @@ var containsReservedMetadataTests = []struct { func TestContainsReservedMetadata(t *testing.T) { for _, test := range containsReservedMetadataTests { - test := test t.Run("", func(t *testing.T) { contains := containsReservedMetadata(test.header) if contains && !test.shouldFail { @@ -201,7 +200,7 @@ func Benchmark_hasBadPathComponent(t *testing.B) { t.Run(tt.name, func(b *testing.B) { b.SetBytes(int64(len(tt.input))) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { if got := hasBadPathComponent(tt.input); got != tt.want { t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want) } diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index f32574870..17824bdbf 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -292,7 +292,7 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) return contentEnc } var newEncs []string - for _, enc := range strings.Split(contentEnc, ",") { + for enc := range strings.SplitSeq(contentEnc, ",") { if enc != streamingContentEncoding { newEncs = append(newEncs, enc) } diff --git a/cmd/httprange.go b/cmd/httprange.go index d6b51f72d..80e64f769 100644 --- a/cmd/httprange.go +++ b/cmd/httprange.go @@ -54,10 +54,7 @@ func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err er case h.IsSuffixLength: specifiedLen := -h.Start - rangeLength = specifiedLen - if specifiedLen > resourceSize { - rangeLength = resourceSize - } + rangeLength = min(specifiedLen, resourceSize) case h.Start >= resourceSize: return 0, InvalidRange{ @@ -98,10 +95,7 @@ func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64 start = h.Start if h.IsSuffixLength { - start = resourceSize + h.Start - if start < 0 { - start = 0 - } + start = max(resourceSize+h.Start, 0) } return start, length, nil } diff --git a/cmd/iam-etcd-store.go b/cmd/iam-etcd-store.go index 16a3df5b3..850606fd5 100644 --- a/cmd/iam-etcd-store.go +++ b/cmd/iam-etcd-store.go @@ -98,7 +98,7 @@ func (ies *IAMEtcdStore) getUsersSysType() UsersSysType { return ies.usersSysType } -func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error { +func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item any, itemPath string, opts ...options) error { data, err := json.Marshal(item) if err != nil { return err @@ -114,7 +114,7 @@ func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, it return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...) } -func getIAMConfig(item interface{}, data []byte, itemPath string) error { +func getIAMConfig(item any, data []byte, itemPath string) error { data, err := decryptData(data, itemPath) if err != nil { return err @@ -123,7 +123,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error { return json.Unmarshal(data, item) } -func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error { +func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item any, path string) error { data, err := readKeyEtcd(ctx, ies.client, path) if err != nil { return err diff --git a/cmd/iam-object-store.go b/cmd/iam-object-store.go index f519e9059..89931148d 100644 --- a/cmd/iam-object-store.go +++ b/cmd/iam-object-store.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "maps" "path" "strings" "sync" @@ -80,7 +81,7 @@ func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType { return iamOS.usersSysType } -func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error { +func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item any, objPath string, opts ...options) error { json := jsoniter.ConfigCompatibleWithStandardLibrary data, err := json.Marshal(item) if err != nil { @@ -135,7 +136,7 @@ func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context, return data, meta, nil } -func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error { +func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item any, objPath string) error { data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath) if err != nil { return err @@ -294,7 +295,6 @@ func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IA g := errgroup.WithNErrs(len(users)) for index := range users { - index := index g.Go(func() error { userName := path.Dir(users[index]) user, err := iamOS.loadUserIdentity(ctx, userName, userType) @@ -413,7 +413,6 @@ func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, use g := errgroup.WithNErrs(len(users)) for index := range users { - index := index g.Go(func() error { userName := strings.TrimSuffix(users[index], ".json") userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup) @@ -538,7 +537,6 @@ func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, polici g := errgroup.WithNErrs(len(policies)) for index := range policies { - index := index g.Go(func() error { policyName := path.Dir(policies[index]) policyDoc, err := iamOS.loadPolicy(ctx, policyName) @@ -776,9 +774,7 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam } // Copy svcUsersMap to cache.iamUsersMap - for k, v := range svcUsersMap { - cache.iamUsersMap[k] = v - } + maps.Copy(cache.iamUsersMap, svcUsersMap) cache.buildUserGroupMemberships() diff --git a/cmd/iam-store.go b/cmd/iam-store.go index 90e615519..c7e4ad492 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "path" "sort" "strings" @@ -159,7 +160,7 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string type UserIdentity struct { Version int `json:"version"` Credentials auth.Credentials `json:"credentials"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` } func newUserIdentity(cred auth.Credentials) UserIdentity { @@ -171,7 +172,7 @@ type GroupInfo struct { Version int `json:"version"` Status string `json:"status"` Members []string `json:"members"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` } func newGroupInfo(members []string) GroupInfo { @@ -182,7 +183,7 @@ func newGroupInfo(members []string) GroupInfo { type MappedPolicy struct { Version int `json:"version"` Policies string `json:"policy"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` } // mappedPoliciesToMap copies the map of mapped policies to a regular map. @@ -198,7 +199,7 @@ func mappedPoliciesToMap(m *xsync.MapOf[string, MappedPolicy]) map[string]Mapped // converts a mapped policy into a slice of distinct policies func (mp MappedPolicy) toSlice() []string { var policies []string - for _, policy := range strings.Split(mp.Policies, ",") { + for policy := range strings.SplitSeq(mp.Policies, ",") { if strings.TrimSpace(policy) == "" { continue } @@ -219,8 +220,8 @@ func newMappedPolicy(policy string) MappedPolicy { type PolicyDoc struct { Version int `json:",omitempty"` Policy policy.Policy - CreateDate time.Time `json:",omitempty"` - UpdateDate time.Time `json:",omitempty"` + CreateDate time.Time + UpdateDate time.Time } func newPolicyDoc(p policy.Policy) PolicyDoc { @@ -400,7 +401,6 @@ func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool, g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time. for index := range groups { - index := index g.Go(func() error { err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap) if err != nil && !errors.Is(err, errNoSuchPolicy) { @@ -610,8 +610,8 @@ type IAMStorageAPI interface { loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error - saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error - loadIAMConfig(ctx context.Context, item interface{}, path string) error + saveIAMConfig(ctx context.Context, item any, path string, opts ...options) error + loadIAMConfig(ctx context.Context, item any, path string) error deleteIAMConfig(ctx context.Context, path string) error savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error @@ -839,7 +839,7 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string, return policies, nil } if store.policy != nil { - val, err, _ := store.policy.Do(name, func() (interface{}, error) { + val, err, _ := store.policy.Do(name, func() (any, error) { return getPolicies() }) if err != nil { @@ -1614,9 +1614,7 @@ func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Polic } cache := store.lock() - for policy, p := range m { - cache.iamPolicyDocsMap[policy] = p - } + maps.Copy(cache.iamPolicyDocsMap, m) store.unlock() for policy, p := range m { @@ -2909,7 +2907,7 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error { groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn - newCachePopulate := func() (val interface{}, err error) { + newCachePopulate := func() (val any, err error) { newCache := newIamCache() // Check for service account first @@ -2975,7 +2973,7 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error } var ( - val interface{} + val any err error ) if store.group != nil { @@ -3007,30 +3005,20 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error return true }) - for k, v := range newCache.iamGroupsMap { - cache.iamGroupsMap[k] = v - } + maps.Copy(cache.iamGroupsMap, newCache.iamGroupsMap) - for k, v := range newCache.iamPolicyDocsMap { - cache.iamPolicyDocsMap[k] = v - } + maps.Copy(cache.iamPolicyDocsMap, newCache.iamPolicyDocsMap) - for k, v := range newCache.iamUserGroupMemberships { - cache.iamUserGroupMemberships[k] = v - } + maps.Copy(cache.iamUserGroupMemberships, newCache.iamUserGroupMemberships) newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool { cache.iamUserPolicyMap.Store(k, v) return true }) - for k, v := range newCache.iamUsersMap { - cache.iamUsersMap[k] = v - } + maps.Copy(cache.iamUsersMap, newCache.iamUsersMap) - for k, v := range newCache.iamSTSAccountsMap { - cache.iamSTSAccountsMap[k] = v - } + maps.Copy(cache.iamSTSAccountsMap, newCache.iamSTSAccountsMap) newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool { cache.iamSTSPolicyMap.Store(k, v) diff --git a/cmd/iam.go b/cmd/iam.go index 07a5041f4..c2f262143 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -1056,7 +1056,7 @@ type newServiceAccountOpts struct { expiration *time.Time allowSiteReplicatorAccount bool // allow creating internal service account for site-replication. - claims map[string]interface{} + claims map[string]any } // NewServiceAccount - create a new service account @@ -1099,7 +1099,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount { return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed } - m := make(map[string]interface{}) + m := make(map[string]any) m[parentClaim] = parentUser if len(policyBuf) > 0 { @@ -1345,7 +1345,7 @@ func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) ( } // GetClaimsForSvcAcc - gets the claims associated with the service account. -func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) { +func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]any, error) { if !sys.Initialized() { return nil, errServerNotInitialized } @@ -1696,10 +1696,8 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...)) } - for k, v := range updatedKeysMap { - // Replace the map values with the updated ones - accessKeyMap[k] = v - } + // Replace the map values with the updated ones + maps.Copy(accessKeyMap, updatedKeysMap) return skippedAccessKeys, nil } diff --git a/cmd/jwt.go b/cmd/jwt.go index d0faaf8ec..c86b8f676 100644 --- a/cmd/jwt.go +++ b/cmd/jwt.go @@ -19,6 +19,7 @@ package cmd import ( "errors" + "maps" "net/http" "time" @@ -110,9 +111,7 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b return nil, nil, false, errAuthentication } - for k, v := range eclaims { - claims.MapClaims[k] = v - } + maps.Copy(claims.MapClaims, eclaims) // if root access is disabled, disable all its service accounts and temporary credentials. if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() { diff --git a/cmd/jwt_test.go b/cmd/jwt_test.go index ac4710a49..a45ce35cb 100644 --- a/cmd/jwt_test.go +++ b/cmd/jwt_test.go @@ -175,7 +175,7 @@ func BenchmarkAuthenticateNode(b *testing.B) { fn := authenticateNode b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { fn(creds.AccessKey, creds.SecretKey) } }) @@ -183,7 +183,7 @@ func BenchmarkAuthenticateNode(b *testing.B) { fn := newCachedAuthToken() b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { fn() } }) diff --git a/cmd/leak-detect_test.go b/cmd/leak-detect_test.go index bc37414fd..e72f4e3e0 100644 --- a/cmd/leak-detect_test.go +++ b/cmd/leak-detect_test.go @@ -139,7 +139,7 @@ func pickRelevantGoroutines() (gs []string) { // get runtime stack buffer. buf := debug.Stack() // runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" . - for _, g := range strings.Split(string(buf), "\n\n") { + for g := range strings.SplitSeq(string(buf), "\n\n") { // Again split on a new line, the first line of the second half contains the info about the go routine. sl := strings.SplitN(g, "\n", 2) if len(sl) != 2 { diff --git a/cmd/local-locker.go b/cmd/local-locker.go index e5904d508..81a012b98 100644 --- a/cmd/local-locker.go +++ b/cmd/local-locker.go @@ -329,7 +329,7 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep lris, ok := l.lockMap[resource] if !ok { // Just to be safe, delete uuids. - for idx := 0; idx < maxDeleteList; idx++ { + for idx := range maxDeleteList { mapID := formatUUID(uid, idx) if _, ok := l.lockUID[mapID]; !ok { break diff --git a/cmd/local-locker_test.go b/cmd/local-locker_test.go index 300a7a1b6..674cf07b6 100644 --- a/cmd/local-locker_test.go +++ b/cmd/local-locker_test.go @@ -279,12 +279,12 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) { } t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) { l := newLocker() - for i := 0; i < locks; i++ { + for range locks { var tmp [16]byte rng.Read(tmp[:]) res := []string{hex.EncodeToString(tmp[:])} - for i := 0; i < readers; i++ { + for range readers { rng.Read(tmp[:]) ok, err := l.RLock(t.Context(), dsync.LockArgs{ UID: uuid.NewString(), @@ -366,12 +366,12 @@ func Test_localLocker_RUnlock(t *testing.T) { } t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) { l := newLocker() - for i := 0; i < locks; i++ { + for range locks { var tmp [16]byte rng.Read(tmp[:]) res := []string{hex.EncodeToString(tmp[:])} - for i := 0; i < readers; i++ { + for range readers { rng.Read(tmp[:]) ok, err := l.RLock(t.Context(), dsync.LockArgs{ UID: uuid.NewString(), diff --git a/cmd/logging.go b/cmd/logging.go index d956406c6..9c24ae22d 100644 --- a/cmd/logging.go +++ b/cmd/logging.go @@ -8,211 +8,211 @@ import ( "github.com/minio/minio/internal/logger" ) -func proxyLogIf(ctx context.Context, err error, errKind ...interface{}) { +func proxyLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "proxy", err, errKind...) } -func replLogIf(ctx context.Context, err error, errKind ...interface{}) { +func replLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "replication", err, errKind...) } -func replLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func replLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "replication", err, id, errKind...) } -func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "iam", err, id, errKind...) } -func iamLogIf(ctx context.Context, err error, errKind ...interface{}) { +func iamLogIf(ctx context.Context, err error, errKind ...any) { if !errors.Is(err, grid.ErrDisconnected) { logger.LogIf(ctx, "iam", err, errKind...) } } -func iamLogEvent(ctx context.Context, msg string, args ...interface{}) { +func iamLogEvent(ctx context.Context, msg string, args ...any) { logger.Event(ctx, "iam", msg, args...) } -func rebalanceLogIf(ctx context.Context, err error, errKind ...interface{}) { +func rebalanceLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "rebalance", err, errKind...) } -func rebalanceLogEvent(ctx context.Context, msg string, args ...interface{}) { +func rebalanceLogEvent(ctx context.Context, msg string, args ...any) { logger.Event(ctx, "rebalance", msg, args...) } -func adminLogIf(ctx context.Context, err error, errKind ...interface{}) { +func adminLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "admin", err, errKind...) } -func authNLogIf(ctx context.Context, err error, errKind ...interface{}) { +func authNLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "authN", err, errKind...) } -func authZLogIf(ctx context.Context, err error, errKind ...interface{}) { +func authZLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "authZ", err, errKind...) } -func peersLogIf(ctx context.Context, err error, errKind ...interface{}) { +func peersLogIf(ctx context.Context, err error, errKind ...any) { if !errors.Is(err, grid.ErrDisconnected) { logger.LogIf(ctx, "peers", err, errKind...) } } -func peersLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { +func peersLogAlwaysIf(ctx context.Context, err error, errKind ...any) { if !errors.Is(err, grid.ErrDisconnected) { logger.LogAlwaysIf(ctx, "peers", err, errKind...) } } -func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { if !errors.Is(err, grid.ErrDisconnected) { logger.LogOnceIf(ctx, "peers", err, id, errKind...) } } -func bugLogIf(ctx context.Context, err error, errKind ...interface{}) { +func bugLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "internal", err, errKind...) } -func healingLogIf(ctx context.Context, err error, errKind ...interface{}) { +func healingLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "healing", err, errKind...) } -func healingLogEvent(ctx context.Context, msg string, args ...interface{}) { +func healingLogEvent(ctx context.Context, msg string, args ...any) { logger.Event(ctx, "healing", msg, args...) } -func healingLogOnceIf(ctx context.Context, err error, errKind ...interface{}) { +func healingLogOnceIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "healing", err, errKind...) } -func batchLogIf(ctx context.Context, err error, errKind ...interface{}) { +func batchLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "batch", err, errKind...) } -func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "batch", err, id, errKind...) } -func bootLogIf(ctx context.Context, err error, errKind ...interface{}) { +func bootLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "bootstrap", err, errKind...) } -func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...) } -func dnsLogIf(ctx context.Context, err error, errKind ...interface{}) { +func dnsLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "dns", err, errKind...) } -func internalLogIf(ctx context.Context, err error, errKind ...interface{}) { +func internalLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "internal", err, errKind...) } -func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "internal", err, id, errKind...) } -func transitionLogIf(ctx context.Context, err error, errKind ...interface{}) { +func transitionLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "transition", err, errKind...) } -func configLogIf(ctx context.Context, err error, errKind ...interface{}) { +func configLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "config", err, errKind...) } -func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "config", err, id, errKind...) } -func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...) } -func scannerLogIf(ctx context.Context, err error, errKind ...interface{}) { +func scannerLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "scanner", err, errKind...) } -func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "scanner", err, id, errKind...) } -func ilmLogIf(ctx context.Context, err error, errKind ...interface{}) { +func ilmLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "ilm", err, errKind...) } -func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "ilm", err, id, errKind...) } -func encLogIf(ctx context.Context, err error, errKind ...interface{}) { +func encLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "encryption", err, errKind...) } -func encLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func encLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "encryption", err, id, errKind...) } -func storageLogIf(ctx context.Context, err error, errKind ...interface{}) { +func storageLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "storage", err, errKind...) } -func storageLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { +func storageLogAlwaysIf(ctx context.Context, err error, errKind ...any) { logger.LogAlwaysIf(ctx, "storage", err, errKind...) } -func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "storage", err, id, errKind...) } -func decomLogIf(ctx context.Context, err error, errKind ...interface{}) { +func decomLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "decom", err, errKind...) } -func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "decom", err, id, errKind...) } -func decomLogEvent(ctx context.Context, msg string, args ...interface{}) { +func decomLogEvent(ctx context.Context, msg string, args ...any) { logger.Event(ctx, "decom", msg, args...) } -func etcdLogIf(ctx context.Context, err error, errKind ...interface{}) { +func etcdLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "etcd", err, errKind...) } -func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "etcd", err, id, errKind...) } -func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) { +func metricsLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "metrics", err, errKind...) } -func s3LogIf(ctx context.Context, err error, errKind ...interface{}) { +func s3LogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "s3", err, errKind...) } -func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "sftp", err, id, errKind...) } -func shutdownLogIf(ctx context.Context, err error, errKind ...interface{}) { +func shutdownLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "shutdown", err, errKind...) } -func stsLogIf(ctx context.Context, err error, errKind ...interface{}) { +func stsLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "sts", err, errKind...) } -func tierLogIf(ctx context.Context, err error, errKind ...interface{}) { +func tierLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "tier", err, errKind...) } -func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) { +func kmsLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "kms", err, errKind...) } @@ -220,11 +220,11 @@ func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) { type KMSLogger struct{} // LogOnceIf is the implementation of LogOnceIf, accessible using the Logger interface -func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "kms", err, id, errKind...) } // LogIf is the implementation of LogIf, accessible using the Logger interface -func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...interface{}) { +func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "kms", err, errKind...) } diff --git a/cmd/metacache-bucket.go b/cmd/metacache-bucket.go index 821db5b4d..4df23d825 100644 --- a/cmd/metacache-bucket.go +++ b/cmd/metacache-bucket.go @@ -20,6 +20,7 @@ package cmd import ( "context" "errors" + "maps" "runtime/debug" "sort" "sync" @@ -70,7 +71,7 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache { } } -func (b *bucketMetacache) debugf(format string, data ...interface{}) { +func (b *bucketMetacache) debugf(format string, data ...any) { if serverDebugLog { console.Debugf(format+"\n", data...) } @@ -195,9 +196,7 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri b.mu.RLock() defer b.mu.RUnlock() dst := make(map[string]metacache, len(b.caches)) - for k, v := range b.caches { - dst[k] = v - } + maps.Copy(dst, b.caches) // Copy indexes dst2 := make(map[string][]string, len(b.cachesRoot)) for k, v := range b.cachesRoot { diff --git a/cmd/metacache-bucket_test.go b/cmd/metacache-bucket_test.go index 6676201b9..768d78538 100644 --- a/cmd/metacache-bucket_test.go +++ b/cmd/metacache-bucket_test.go @@ -33,7 +33,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) { for i := range pathNames[:] { pathNames[i] = fmt.Sprintf("prefix/%d", i) } - for i := 0; i < elements; i++ { + for i := range elements { bm.findCache(listPathOptions{ ID: mustGetUUID(), Bucket: "", @@ -49,8 +49,8 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) { }) } b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { bm.findCache(listPathOptions{ ID: mustGetUUID(), Bucket: "", diff --git a/cmd/metacache-entries_test.go b/cmd/metacache-entries_test.go index 3857724d8..75af4d0f2 100644 --- a/cmd/metacache-entries_test.go +++ b/cmd/metacache-entries_test.go @@ -633,7 +633,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) { for testID, tt := range tests { rng := rand.New(rand.NewSource(0)) // Run for a number of times, shuffling the input to ensure that output is consistent. - for i := 0; i < 10; i++ { + for i := range 10 { t.Run(fmt.Sprintf("test-%d-%s-run-%d", testID, tt.name, i), func(t *testing.T) { if i > 0 { rng.Shuffle(len(tt.m), func(i, j int) { diff --git a/cmd/metacache-marker.go b/cmd/metacache-marker.go index d85cbab56..4548af2b9 100644 --- a/cmd/metacache-marker.go +++ b/cmd/metacache-marker.go @@ -38,8 +38,8 @@ func (o *listPathOptions) parseMarker() { o.Marker = s[:start] end := strings.LastIndex(s, "]") tag := strings.Trim(s[start:end], "[]") - tags := strings.Split(tag, ",") - for _, tag := range tags { + tags := strings.SplitSeq(tag, ",") + for tag := range tags { kv := strings.Split(tag, ":") if len(kv) < 2 { continue diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index 0f0786e4a..bc438c8a9 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -25,6 +25,7 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "strconv" "strings" @@ -162,13 +163,13 @@ func (o listPathOptions) newMetacache() metacache { } } -func (o *listPathOptions) debugf(format string, data ...interface{}) { +func (o *listPathOptions) debugf(format string, data ...any) { if serverDebugLog { console.Debugf(format+"\n", data...) } } -func (o *listPathOptions) debugln(data ...interface{}) { +func (o *listPathOptions) debugln(data ...any) { if serverDebugLog { console.Debugln(data...) } @@ -906,9 +907,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache fi := FileInfo{ Metadata: make(map[string]string, len(meta)), } - for k, v := range meta { - fi.Metadata[k] = v - } + maps.Copy(fi.Metadata, meta) err := er.updateObjectMetaWithOpts(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks(), UpdateMetadataOpts{NoPersistence: true}) if err == nil { break diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index d7043a135..f56208212 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -20,6 +20,7 @@ package cmd import ( "context" "fmt" + "maps" "math" "net/http" "runtime" @@ -431,15 +432,9 @@ func (m *MetricV2) clone() MetricV2 { VariableLabels: make(map[string]string, len(m.VariableLabels)), Histogram: make(map[string]uint64, len(m.Histogram)), } - for k, v := range m.StaticLabels { - metric.StaticLabels[k] = v - } - for k, v := range m.VariableLabels { - metric.VariableLabels[k] = v - } - for k, v := range m.Histogram { - metric.Histogram[k] = v - } + maps.Copy(metric.StaticLabels, m.StaticLabels) + maps.Copy(metric.VariableLabels, m.VariableLabels) + maps.Copy(metric.Histogram, m.Histogram) return metric } @@ -2492,10 +2487,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { "endpoint": ep, }, } - dwntime := currDowntime - if health.offlineDuration > currDowntime { - dwntime = health.offlineDuration - } + dwntime := max(health.offlineDuration, currDowntime) downtimeDuration.Value = float64(dwntime / time.Second) ml = append(ml, downtimeDuration) } diff --git a/cmd/metrics-v3-handler.go b/cmd/metrics-v3-handler.go index 0c9a775a6..7033fe310 100644 --- a/cmd/metrics-v3-handler.go +++ b/cmd/metrics-v3-handler.go @@ -35,7 +35,7 @@ import ( type promLogger struct{} -func (p promLogger) Println(v ...interface{}) { +func (p promLogger) Println(v ...any) { metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v)) } diff --git a/cmd/namespace-lock_test.go b/cmd/namespace-lock_test.go index fae7bd377..3f44e5f9a 100644 --- a/cmd/namespace-lock_test.go +++ b/cmd/namespace-lock_test.go @@ -45,7 +45,7 @@ func TestNSLockRace(t *testing.T) { ctx := t.Context() - for i := 0; i < 10000; i++ { + for i := range 10000 { nsLk := newNSLock(false) // lk1; ref=1 diff --git a/cmd/net_test.go b/cmd/net_test.go index 94ed00214..94064a383 100644 --- a/cmd/net_test.go +++ b/cmd/net_test.go @@ -201,7 +201,6 @@ func TestCheckLocalServerAddr(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { err := CheckLocalServerAddr(testCase.serverAddr) switch { @@ -273,7 +272,6 @@ func TestSameLocalAddrs(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2) if testCase.expectedErr != nil && err == nil { diff --git a/cmd/notification.go b/cmd/notification.go index 36dc70e07..021d9d652 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -155,7 +155,6 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -170,7 +169,6 @@ func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -185,7 +183,6 @@ func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) [ func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -200,7 +197,6 @@ func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup s func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -215,7 +211,6 @@ func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) [] func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp bool) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -230,7 +225,6 @@ func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -245,7 +239,6 @@ func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []Notif func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -260,7 +253,6 @@ func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey func (sys *NotificationSys) LoadServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable @@ -276,7 +268,6 @@ func (sys *NotificationSys) BackgroundHealStatus(ctx context.Context) ([]madmin. ng := WithNPeers(len(sys.peerClients)) states := make([]madmin.BgHealState, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client ng.Go(ctx, func() error { if client == nil { @@ -485,7 +476,6 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe locksResp := make([]*PeerLocks, len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client g.Go(func() error { if client == nil { @@ -570,7 +560,6 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck ng := WithNPeers(len(sys.peerClients)).WithRetries(1) replicationStats := make([]BucketStatsMap, len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client ng.Go(ctx, func() error { if client == nil { @@ -612,7 +601,6 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam ng := WithNPeers(len(sys.peerClients)).WithRetries(1) bucketStats := make([]BucketStats, len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client ng.Go(ctx, func() error { if client == nil { @@ -647,7 +635,6 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri ng := WithNPeers(len(sys.peerClients)).WithRetries(1) siteStats := make([]SRMetricsSummary, len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client ng.Go(ctx, func() error { if client == nil { @@ -926,7 +913,6 @@ func (sys *NotificationSys) GetResourceMetrics(ctx context.Context) <-chan Metri g := errgroup.WithNErrs(len(sys.peerClients)) peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) for index := range sys.peerClients { - index := index g.Go(func() error { if sys.peerClients[index] == nil { return errPeerNotReachable @@ -1302,7 +1288,6 @@ func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan MetricV g := errgroup.WithNErrs(len(sys.peerClients)) peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) for index := range sys.peerClients { - index := index g.Go(func() error { if sys.peerClients[index] == nil { return errPeerNotReachable @@ -1323,7 +1308,6 @@ func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan Metric g := errgroup.WithNErrs(len(sys.peerClients)) peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) for index := range sys.peerClients { - index := index g.Go(func() error { if sys.peerClients[index] == nil { return errPeerNotReachable diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index b6672deab..d00e2be19 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -19,6 +19,7 @@ package cmd import ( "io" + "maps" "math" "net/http" "time" @@ -290,9 +291,7 @@ func (o *ObjectInfo) Clone() (cinfo ObjectInfo) { VersionPurgeStatusInternal: o.VersionPurgeStatusInternal, } cinfo.UserDefined = make(map[string]string, len(o.UserDefined)) - for k, v := range o.UserDefined { - cinfo.UserDefined[k] = v - } + maps.Copy(cinfo.UserDefined, o.UserDefined) return cinfo } diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 00dd61328..2a9ca570a 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -156,7 +156,6 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { var err error var resultL ListObjectsInfo @@ -944,7 +943,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys) result, err := obj.ListObjects(t.Context(), testCase.bucketName, @@ -1676,7 +1674,6 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { result, err := obj.ListObjectVersions(t.Context(), testCase.bucketName, testCase.prefix, testCase.marker, "", testCase.delimiter, int(testCase.maxKeys)) @@ -1827,7 +1824,6 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { var foundObjects []ObjectInfo var foundPrefixes []string @@ -1914,7 +1910,7 @@ func BenchmarkListObjects(b *testing.B) { } // Insert objects to be listed and benchmarked later. - for i := 0; i < 20000; i++ { + for i := range 20000 { key := "obj" + strconv.Itoa(i) _, err = obj.PutObject(b.Context(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{}) if err != nil { @@ -1922,10 +1918,8 @@ func BenchmarkListObjects(b *testing.B) { } } - b.ResetTimer() - // List the buckets over and over and over. - for i := 0; i < b.N; i++ { + for b.Loop() { _, err = obj.ListObjects(b.Context(), bucket, "", "obj9000", "", -1) if err != nil { b.Fatal(err) diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index dada0ae85..d888df717 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -369,7 +369,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) } - for i := 0; i < 3; i++ { + for range 3 { // Initiate Multipart Upload on bucketNames[1] for the same object 3 times. // Used to test the listing for the case of multiple uploadID's for a given object. res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts) @@ -392,7 +392,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan } // Initiate Multipart Upload on bucketNames[2]. // Used to test the listing for the case of multiple objects for a given bucket. - for i := 0; i < 6; i++ { + for i := range 6 { res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts) if err != nil { // Failed to create NewMultipartUpload, abort. @@ -2167,7 +2167,6 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T } for _, testCase := range testCases { - testCase := testCase t.(*testing.T).Run("", func(t *testing.T) { opts = ObjectOptions{} actualResult, actualErr := obj.CompleteMultipartUpload(t.Context(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{}) diff --git a/cmd/object-api-options.go b/cmd/object-api-options.go index d5ae085e3..1e0c10a75 100644 --- a/cmd/object-api-options.go +++ b/cmd/object-api-options.go @@ -226,7 +226,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) { attributes = make(map[string]struct{}) for _, headerVal := range h.Values(xhttp.AmzObjectAttributes) { - for _, v := range strings.Split(strings.TrimSpace(headerVal), ",") { + for v := range strings.SplitSeq(strings.TrimSpace(headerVal), ",") { if v != "" { attributes[v] = struct{}{} } diff --git a/cmd/object-api-utils_test.go b/cmd/object-api-utils_test.go index 933a18ccb..53992bf86 100644 --- a/cmd/object-api-utils_test.go +++ b/cmd/object-api-utils_test.go @@ -61,14 +61,14 @@ func benchmark(b *testing.B, data []string) { b.Run("concat naive", func(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { concatNaive(data...) } }) b.Run("concat fast", func(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { concat(data...) } }) @@ -77,7 +77,7 @@ func benchmark(b *testing.B, data []string) { func BenchmarkConcatImplementation(b *testing.B) { data := make([]string, 2) rng := rand.New(rand.NewSource(0)) - for i := 0; i < 2; i++ { + for i := range 2 { var tmp [16]byte rng.Read(tmp[:]) data[i] = hex.EncodeToString(tmp[:]) @@ -91,7 +91,7 @@ func BenchmarkPathJoinOld(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { pathJoinOld("volume", "path/path/path") } }) @@ -102,7 +102,7 @@ func BenchmarkPathJoin(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { pathJoin("volume", "path/path/path") } }) diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 418436bd9..7879300be 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -25,6 +25,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/http/httptest" "net/textproto" @@ -1642,15 +1643,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano) } // Store the preserved compression metadata. - for k, v := range compressMetadata { - srcInfo.UserDefined[k] = v - } + maps.Copy(srcInfo.UserDefined, compressMetadata) // We need to preserve the encryption headers set in EncryptRequest, // so we do not want to override them, copy them instead. - for k, v := range encMetadata { - srcInfo.UserDefined[k] = v - } + maps.Copy(srcInfo.UserDefined, encMetadata) // Ensure that metadata does not contain sensitive information crypto.RemoveSensitiveEntries(srcInfo.UserDefined) @@ -2408,8 +2405,8 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if k == "minio.versionId" { continue } - if strings.HasPrefix(k, "minio.metadata.") { - k = strings.TrimPrefix(k, "minio.metadata.") + if after, ok0 := strings.CutPrefix(k, "minio.metadata."); ok0 { + k = after hdrs.Set(k, v) } } @@ -2417,9 +2414,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if err != nil { return err } - for k, v := range m { - metadata[k] = v - } + maps.Copy(metadata, m) } else { versionID = r.Form.Get(xhttp.VersionID) hdrs = r.Header diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index a0577e0f7..8da26b3dd 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -29,6 +29,7 @@ import ( "hash" "hash/crc32" "io" + "maps" "net/http" "net/http/httptest" "net/url" @@ -228,9 +229,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke } mapCopy = func(m map[string]string) map[string]string { r := make(map[string]string, len(m)) - for k, v := range m { - r[k] = v - } + maps.Copy(r, m) return r } ) @@ -662,9 +661,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str } mapCopy = func(m map[string]string) map[string]string { r := make(map[string]string, len(m)) - for k, v := range m { - r[k] = v - } + maps.Copy(r, m) return r } ) @@ -860,9 +857,7 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket } mapCopy = func(m map[string]string) map[string]string { r := make(map[string]string, len(m)) - for k, v := range m { - r[k] = v - } + maps.Copy(r, m) return r } ) @@ -2819,7 +2814,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam objectName := "test-object-new-multipart-parallel" var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) // Initiate NewMultipart upload on the same object 10 times concurrrently. go func() { @@ -2883,7 +2878,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s // upload IDs collected. var uploadIDs []string - for i := 0; i < 2; i++ { + for range 2 { // initiate new multipart uploadID. res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts) if err != nil { @@ -3251,7 +3246,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri // upload IDs collected. var uploadIDs []string - for i := 0; i < 2; i++ { + for range 2 { // initiate new multipart uploadID. res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts) if err != nil { diff --git a/cmd/object-multipart-handlers.go b/cmd/object-multipart-handlers.go index 2226e407c..c7edaf76f 100644 --- a/cmd/object-multipart-handlers.go +++ b/cmd/object-multipart-handlers.go @@ -22,6 +22,7 @@ import ( "context" "fmt" "io" + "maps" "net/http" "net/url" "sort" @@ -183,9 +184,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r // We need to preserve the encryption headers set in EncryptRequest, // so we do not want to override them, copy them instead. - for k, v := range encMetadata { - metadata[k] = v - } + maps.Copy(metadata, encMetadata) // Ensure that metadata does not contain sensitive information crypto.RemoveSensitiveEntries(metadata) @@ -525,7 +524,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt copy(objectEncryptionKey[:], key) var nonce [12]byte - tmp := sha256.Sum256([]byte(fmt.Sprint(uploadID, partID))) + tmp := sha256.Sum256(fmt.Append(nil, uploadID, partID)) copy(nonce[:], tmp[:12]) partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID)) @@ -823,7 +822,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http } var nonce [12]byte - tmp := sha256.Sum256([]byte(fmt.Sprint(uploadID, partID))) + tmp := sha256.Sum256(fmt.Append(nil, uploadID, partID)) copy(nonce[:], tmp[:12]) reader, err = sio.EncryptReader(in, sio.Config{ diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index 3fed8c437..3377ad208 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -191,7 +191,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH if err != nil { t.Fatalf("%s: %s", instanceType, err) } - for i := 0; i < 10; i++ { + for i := range 10 { randomPerm := rand.Perm(100) randomString := "" for _, num := range randomPerm { @@ -256,7 +256,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." var opts ObjectOptions // check before paging occurs. - for i := 0; i < 5; i++ { + for i := range 5 { key := "obj" + strconv.Itoa(i) _, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts) if err != nil { @@ -439,7 +439,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { // check paging works. ag := []string{"a", "b", "c", "d", "e", "f", "g"} checkObjCount := make(map[string]int) - for i := 0; i < 7; i++ { + for i := range 7 { dirName := strings.Repeat(ag[i], 3) key := fmt.Sprintf("testPrefix/%s/obj%s", dirName, dirName) checkObjCount[key]++ diff --git a/cmd/os-instrumented.go b/cmd/os-instrumented.go index d9b92d4a4..9a369f866 100644 --- a/cmd/os-instrumented.go +++ b/cmd/os-instrumented.go @@ -213,7 +213,7 @@ func (o *osMetrics) report() madmin.OSMetrics { var m madmin.OSMetrics m.CollectedAt = time.Now() m.LifeTimeOps = make(map[string]uint64, osMetricLast) - for i := osMetric(0); i < osMetricLast; i++ { + for i := range osMetricLast { if n := atomic.LoadUint64(&o.operations[i]); n > 0 { m.LifeTimeOps[i.String()] = n } @@ -223,7 +223,7 @@ func (o *osMetrics) report() madmin.OSMetrics { } m.LastMinute.Operations = make(map[string]madmin.TimedAction, osMetricLast) - for i := osMetric(0); i < osMetricLast; i++ { + for i := range osMetricLast { lm := o.latency[i].total() if lm.N > 0 { m.LastMinute.Operations[i.String()] = lm.asTimedAction() diff --git a/cmd/peer-s3-client.go b/cmd/peer-s3-client.go index feb0da705..438b4d336 100644 --- a/cmd/peer-s3-client.go +++ b/cmd/peer-s3-client.go @@ -113,7 +113,6 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin g := errgroup.WithNErrs(len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -148,7 +147,6 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin g = errgroup.WithNErrs(len(sys.peerClients)) healBucketResults := make([]madmin.HealResultItem, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -207,7 +205,6 @@ func (sys *S3PeerSys) ListBuckets(ctx context.Context, opts BucketOptions) ([]Bu nodeBuckets := make([][]BucketInfo, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -295,7 +292,6 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc bucketInfos := make([]BucketInfo, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -401,7 +397,6 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri func (sys *S3PeerSys) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error { g := errgroup.WithNErrs(len(sys.peerClients)) for idx, client := range sys.peerClients { - client := client g.Go(func() error { if client == nil { return errPeerOffline @@ -448,7 +443,6 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string, func (sys *S3PeerSys) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error { g := errgroup.WithNErrs(len(sys.peerClients)) for idx, client := range sys.peerClients { - client := client g.Go(func() error { if client == nil { return errPeerOffline diff --git a/cmd/peer-s3-server.go b/cmd/peer-s3-server.go index abbbbed91..227aebe8d 100644 --- a/cmd/peer-s3-server.go +++ b/cmd/peer-s3-server.go @@ -47,7 +47,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() (serr error) { if localDrives[index] == nil { beforeState[index] = madmin.DriveStateOffline @@ -111,7 +110,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( if !isMinioMetaBucketName(bucket) && !isAllBucketsNotFound(errs) && opts.Remove { g := errgroup.WithNErrs(len(localDrives)) for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound @@ -131,7 +129,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if beforeState[index] == madmin.DriveStateMissing { err := localDrives[index].MakeVol(ctx, bucket) @@ -225,7 +222,6 @@ func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound @@ -273,7 +269,6 @@ func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOpti // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound @@ -294,7 +289,6 @@ func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions) // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound diff --git a/cmd/perf-tests.go b/cmd/perf-tests.go index 33b01722d..3422b54f3 100644 --- a/cmd/perf-tests.go +++ b/cmd/perf-tests.go @@ -375,7 +375,7 @@ func siteNetperf(ctx context.Context, duration time.Duration) madmin.SiteNetPerf } info := info wg.Add(connectionsPerPeer) - for i := 0; i < connectionsPerPeer; i++ { + for range connectionsPerPeer { go func() { defer wg.Done() ctx, cancel := context.WithTimeout(ctx, duration+10*time.Second) diff --git a/cmd/post-policy-fan-out.go b/cmd/post-policy-fan-out.go index 94c9b9204..23a118986 100644 --- a/cmd/post-policy-fan-out.go +++ b/cmd/post-policy-fan-out.go @@ -20,6 +20,7 @@ package cmd import ( "bytes" "context" + "maps" "sync" "github.com/minio/minio-go/v7" @@ -78,9 +79,7 @@ func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, }() userDefined := make(map[string]string, len(req.UserMetadata)) - for k, v := range req.UserMetadata { - userDefined[k] = v - } + maps.Copy(userDefined, req.UserMetadata) tgs, err := tags.NewTags(req.UserTags, true) if err != nil { diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 236e6dd10..9a02ca846 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -23,6 +23,7 @@ import ( "encoding/base64" "fmt" "io" + "maps" "mime/multipart" "net/http" "net/http/httptest" @@ -321,7 +322,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus int accessKey string secretKey string - dates []interface{} + dates []any policy string noFilename bool corruptedBase64 bool @@ -334,7 +335,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusNoContent, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"],["eq", "$content-encoding", "gzip"]]}`, }, // Success case, no multipart filename. @@ -344,7 +345,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusNoContent, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"],["eq", "$content-encoding", "gzip"]]}`, noFilename: true, }, @@ -355,7 +356,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusNoContent, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"],["eq", "$content-encoding", "gzip"]]}`, }, // Corrupted Base 64 result @@ -365,7 +366,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusBadRequest, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, corruptedBase64: true, }, @@ -376,7 +377,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusBadRequest, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, corruptedMultipart: true, }, @@ -388,7 +389,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusForbidden, accessKey: "", secretKey: "", - dates: []interface{}{}, + dates: []any{}, policy: ``, }, // Expired document @@ -398,7 +399,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusForbidden, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTime.Add(-1 * time.Minute * 5).Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTime.Add(-1 * time.Minute * 5).Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, }, // Corrupted policy document @@ -408,7 +409,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusForbidden, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"3/aws4_request"]]}`, }, } @@ -550,7 +551,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. rec := httptest.NewRecorder() - dates := []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)} + dates := []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)} policy := `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], {"success_action_redirect":"` + redirectURL.String() + `"},["starts-with", "$key", "test/"], ["eq", "$x-amz-meta-uuid", "1234"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$content-encoding", "gzip"]]}` // Generate the final policy document @@ -620,9 +621,7 @@ func newPostRequestV2(endPoint, bucketName, objectName string, secretKey string, "signature": signature, } - for key, value := range formInputData { - formData[key] = value - } + maps.Copy(formData, formInputData) // Create the multipart form. var buf bytes.Buffer @@ -705,9 +704,7 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData [] } // Add form data - for k, v := range addFormData { - formData[k] = v - } + maps.Copy(formData, addFormData) // Create the multipart form. var buf bytes.Buffer diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index 9119c1e7a..e9b616b76 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -61,7 +61,7 @@ const ( ) // toString - Safely convert interface to string without causing panic. -func toString(val interface{}) string { +func toString(val any) string { switch v := val.(type) { case string: return v @@ -71,12 +71,12 @@ func toString(val interface{}) string { } // toLowerString - safely convert interface to lower string -func toLowerString(val interface{}) string { +func toLowerString(val any) string { return strings.ToLower(toString(val)) } // toInteger _ Safely convert interface to integer without causing panic. -func toInteger(val interface{}) (int64, error) { +func toInteger(val any) (int64, error) { switch v := val.(type) { case float64: return int64(v), nil @@ -93,7 +93,7 @@ func toInteger(val interface{}) (int64, error) { } // isString - Safely check if val is of type string without causing panic. -func isString(val interface{}) bool { +func isString(val any) bool { _, ok := val.(string) return ok } @@ -161,8 +161,8 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { // Convert po into interfaces and // perform strict type conversion using reflection. var rawPolicy struct { - Expiration string `json:"expiration"` - Conditions []interface{} `json:"conditions"` + Expiration string `json:"expiration"` + Conditions []any `json:"conditions"` } d.DisallowUnknownFields() @@ -181,7 +181,7 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { // Parse conditions. for _, val := range rawPolicy.Conditions { switch condt := val.(type) { - case map[string]interface{}: // Handle key:value map types. + case map[string]any: // Handle key:value map types. for k, v := range condt { if !isString(v) { // Pre-check value type. // All values must be of type string. @@ -197,7 +197,7 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { policyCondEqual, "$" + strings.ToLower(k), toString(v), }) } - case []interface{}: // Handle array types. + case []any: // Handle array types. if len(condt) != 3 { // Return error if we have insufficient elements. return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String()) } diff --git a/cmd/postpolicyform_test.go b/cmd/postpolicyform_test.go index 0f86044b8..721718bca 100644 --- a/cmd/postpolicyform_test.go +++ b/cmd/postpolicyform_test.go @@ -65,7 +65,6 @@ func TestParsePostPolicyForm(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { _, err := parsePostPolicyForm(strings.NewReader(testCase.policy)) if testCase.success && err != nil { diff --git a/cmd/rebalance-admin.go b/cmd/rebalance-admin.go index 3cd831d15..4fa372ceb 100644 --- a/cmd/rebalance-admin.go +++ b/cmd/rebalance-admin.go @@ -33,17 +33,17 @@ type rebalPoolProgress struct { } type rebalancePoolStatus struct { - ID int `json:"id"` // Pool index (zero-based) - Status string `json:"status"` // Active if rebalance is running, empty otherwise - Used float64 `json:"used"` // Percentage used space - Progress rebalPoolProgress `json:"progress,omitempty"` // is empty when rebalance is not running + ID int `json:"id"` // Pool index (zero-based) + Status string `json:"status"` // Active if rebalance is running, empty otherwise + Used float64 `json:"used"` // Percentage used space + Progress rebalPoolProgress `json:"progress"` // is empty when rebalance is not running } // rebalanceAdminStatus holds rebalance status related information exported to mc, console, etc. type rebalanceAdminStatus struct { ID string // identifies the ongoing rebalance operation by a uuid Pools []rebalancePoolStatus `json:"pools"` // contains all pools, including inactive - StoppedAt time.Time `json:"stoppedAt,omitempty"` + StoppedAt time.Time `json:"stoppedAt"` } func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdminStatus, err error) { diff --git a/cmd/s3-zip-handlers.go b/cmd/s3-zip-handlers.go index b11106f45..e2c91226c 100644 --- a/cmd/s3-zip-handlers.go +++ b/cmd/s3-zip-handlers.go @@ -180,10 +180,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, if file.UncompressedSize64 > 0 { // There may be number of header bytes before the content. // Reading 64K extra. This should more than cover name and any "extra" details. - end := file.Offset + int64(file.CompressedSize64) + 64<<10 - if end > zipObjInfo.Size { - end = zipObjInfo.Size - } + end := min(file.Offset+int64(file.CompressedSize64)+64<<10, zipObjInfo.Size) rs := &HTTPRangeSpec{Start: file.Offset, End: end} gr, err := objectAPI.GetObjectNInfo(ctx, bucket, zipPath, rs, nil, opts) if err != nil { diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index f80f8ace8..fd397b9f4 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -52,7 +52,6 @@ func TestServerConfigFile(t *testing.T) { expectedErr: true, }, } { - testcase := testcase t.Run(testcase.config, func(t *testing.T) { sctx := &serverCtxt{} err := mergeServerCtxtFromConfigFile(testcase.config, sctx) diff --git a/cmd/server_test.go b/cmd/server_test.go index ec49d89bd..e69117351 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -60,7 +60,7 @@ type check struct { } // Assert - checks if gotValue is same as expectedValue, if not fails the test. -func (c *check) Assert(gotValue interface{}, expectedValue interface{}) { +func (c *check) Assert(gotValue any, expectedValue any) { c.Helper() if !reflect.DeepEqual(gotValue, expectedValue) { c.Fatalf("Test %s expected %v, got %v", c.testType, expectedValue, gotValue) @@ -653,7 +653,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { delObjReq := DeleteObjectsRequest{ Quiet: false, } - for i := 0; i < 10; i++ { + for i := range 10 { // Obtain http request to upload object. // object Name contains a prefix. objName := fmt.Sprintf("%d/%s", i, objectName) @@ -690,7 +690,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) err = xml.Unmarshal(delRespBytes, &deleteResp) c.Assert(err, nil) - for i := 0; i < 10; i++ { + for i := range 10 { // All the objects should be under deleted list (including non-existent object) c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ ObjectName: delObjReq.Objects[i].ObjectName, @@ -714,7 +714,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { err = xml.Unmarshal(delRespBytes, &deleteResp) c.Assert(err, nil) c.Assert(len(deleteResp.DeletedObjects), len(delObjReq.Objects)) - for i := 0; i < 10; i++ { + for i := range 10 { c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ ObjectName: delObjReq.Objects[i].ObjectName, VersionID: delObjReq.Objects[i].VersionID, @@ -1054,7 +1054,7 @@ func (s *TestSuiteCommon) TestPutBucket(c *check) { // The purpose this block is not to check for correctness of functionality // Run the test with -race flag to utilize this var wg sync.WaitGroup - for i := 0; i < testConcurrencyLevel; i++ { + for range testConcurrencyLevel { wg.Add(1) go func() { defer wg.Done() @@ -2127,7 +2127,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge10MiB(c *check) { 1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890, 1234567890,1234567890,1234567890,1234567890,1234567890,123"` // Create 10MiB content where each line contains 1024 characters. - for i := 0; i < 10*1024; i++ { + for i := range 10 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } putContent := buffer.String() @@ -2189,7 +2189,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *check) { 1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890, 1234567890,1234567890,1234567890,123` // Create 11MiB content where each line contains 1024 characters. - for i := 0; i < 11*1024; i++ { + for i := range 11 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } putMD5 := getMD5Hash(buffer.Bytes()) @@ -2340,7 +2340,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge11MiB(c *check) { 1234567890,1234567890,1234567890,123` // Create 11MiB content where each line contains 1024 // characters. - for i := 0; i < 11*1024; i++ { + for i := range 11 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } putContent := buffer.String() @@ -2406,7 +2406,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge10MiB(c *check) { 1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890, 1234567890,1234567890,1234567890,123` // Create 10MiB content where each line contains 1024 characters. - for i := 0; i < 10*1024; i++ { + for i := range 10 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } diff --git a/cmd/sftp-server.go b/cmd/sftp-server.go index 06bc72dd9..e286f18ec 100644 --- a/cmd/sftp-server.go +++ b/cmd/sftp-server.go @@ -238,7 +238,7 @@ func processLDAPAuthentication(key ssh.PublicKey, pass []byte, user string) (per return nil, errSFTPUserHasNoPolicies } - claims := make(map[string]interface{}) + claims := make(map[string]any) for attribKey, attribValue := range lookupResult.Attributes { // we skip multi-value attributes here, as they cannot // be stored in the critical options. diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index bc88bab20..1fd42ba70 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -95,7 +95,7 @@ func doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, APIEr // Escape encodedQuery string into unescaped list of query params, returns error // if any while unescaping the values. func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { - for _, query := range strings.Split(encodedQuery, "&") { + for query := range strings.SplitSeq(encodedQuery, "&") { var unescapedQuery string unescapedQuery, err = url.QueryUnescape(query) if err != nil { diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 7ebaf61e9..b2c2b0127 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -32,7 +32,7 @@ func TestResourceListSorting(t *testing.T) { sortedResourceList := make([]string, len(resourceList)) copy(sortedResourceList, resourceList) sort.Strings(sortedResourceList) - for i := 0; i < len(resourceList); i++ { + for i := range resourceList { if resourceList[i] != sortedResourceList[i] { t.Errorf("Expected resourceList[%d] = \"%s\", resourceList is not correctly sorted.", i, sortedResourceList[i]) break diff --git a/cmd/site-replication-metrics.go b/cmd/site-replication-metrics.go index 4dd3b8d3b..bb3715313 100644 --- a/cmd/site-replication-metrics.go +++ b/cmd/site-replication-metrics.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" + "maps" "sync" "sync/atomic" "time" @@ -54,9 +55,7 @@ func (rt *RTimedMetrics) toMetric() madmin.TimedErrStats { return madmin.TimedErrStats{} } errCounts := make(map[string]int) - for k, v := range rt.ErrCounts { - errCounts[k] = v - } + maps.Copy(errCounts, rt.ErrCounts) minuteTotals := rt.LastMinute.getTotal() hourTotals := rt.LastHour.getTotal() return madmin.TimedErrStats{ @@ -99,9 +98,7 @@ func (rt *RTimedMetrics) merge(o RTimedMetrics) (n RTimedMetrics) { n.LastHour = n.LastHour.merge(rt.LastHour) n.LastHour = n.LastHour.merge(o.LastHour) n.ErrCounts = make(map[string]int) - for k, v := range rt.ErrCounts { - n.ErrCounts[k] = v - } + maps.Copy(n.ErrCounts, rt.ErrCounts) for k, v := range o.ErrCounts { n.ErrCounts[k] += v } @@ -264,7 +261,7 @@ type SRMetric struct { ReplicatedCount int64 `json:"replicatedCount"` // Failed captures replication errors in various time windows - Failed madmin.TimedErrStats `json:"failed,omitempty"` + Failed madmin.TimedErrStats `json:"failed"` XferStats map[RMetricName]XferStats `json:"transferSummary"` } diff --git a/cmd/site-replication-utils.go b/cmd/site-replication-utils.go index b6b41ca5f..192275845 100644 --- a/cmd/site-replication-utils.go +++ b/cmd/site-replication-utils.go @@ -19,6 +19,7 @@ package cmd import ( "context" + "maps" "math/rand" "sync" "time" @@ -45,9 +46,7 @@ func (s *SiteResyncStatus) clone() SiteResyncStatus { } o := *s o.BucketStatuses = make(map[string]ResyncStatusType, len(s.BucketStatuses)) - for b, st := range s.BucketStatuses { - o.BucketStatuses[b] = st - } + maps.Copy(o.BucketStatuses, s.BucketStatuses) return o } @@ -88,11 +87,9 @@ func (sm *siteResyncMetrics) init(ctx context.Context) { <-ctx.Done() return } - duration := time.Duration(r.Float64() * float64(time.Second*10)) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(time.Second*10)), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } } diff --git a/cmd/site-replication.go b/cmd/site-replication.go index be4dd334f..8dc12a6b0 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -26,10 +26,12 @@ import ( "encoding/xml" "errors" "fmt" + "maps" "math/rand" "net/url" "reflect" "runtime" + "slices" "sort" "strings" "sync" @@ -240,11 +242,9 @@ func (c *SiteReplicationSys) Init(ctx context.Context, objAPI ObjectLayer) error } replLogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init") - duration := time.Duration(r.Float64() * float64(time.Minute)) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(time.Minute)), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } c.RLock() @@ -720,7 +720,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration var wg sync.WaitGroup var resultsMu sync.RWMutex for _, info := range infos.Sites { - info := info // will call siteNetperf, means call others's adminAPISiteReplicationDevNull if globalDeploymentID() == info.DeploymentID { wg.Add(1) @@ -2831,9 +2830,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O info.Enabled = true info.Sites = make(map[string]madmin.PeerInfo, len(c.state.Peers)) - for d, peer := range c.state.Peers { - info.Sites[d] = peer - } + maps.Copy(info.Sites, c.state.Peers) info.UpdatedAt = c.state.UpdatedAt var maxBuckets int @@ -3816,9 +3813,7 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI info.ILMExpiryRules[opts.EntityValue] = rule } } else { - for id, rule := range allRules { - info.ILMExpiryRules[id] = rule - } + maps.Copy(info.ILMExpiryRules, allRules) } } if opts.PeerState { @@ -3956,9 +3951,7 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI return info, errSRBackendIssue(errG) } } - for group, d := range groupDescMap { - info.GroupDescMap[group] = d - } + maps.Copy(info.GroupDescMap, groupDescMap) } } // cache SR metadata info for IAM @@ -5692,11 +5685,8 @@ func isGroupDescEqual(g1, g2 madmin.GroupDesc) bool { } for _, v1 := range g1.Members { var found bool - for _, v2 := range g2.Members { - if v1 == v2 { - found = true - break - } + if slices.Contains(g2.Members, v1) { + found = true } if !found { return false @@ -5716,11 +5706,8 @@ func isUserInfoEqual(u1, u2 madmin.UserInfo) bool { } for _, v1 := range u1.MemberOf { var found bool - for _, v2 := range u2.MemberOf { - if v1 == v2 { - found = true - break - } + if slices.Contains(u2.MemberOf, v1) { + found = true } if !found { return false diff --git a/cmd/storage-datatypes_test.go b/cmd/storage-datatypes_test.go index 91c3547c0..60470a221 100644 --- a/cmd/storage-datatypes_test.go +++ b/cmd/storage-datatypes_test.go @@ -39,8 +39,8 @@ func BenchmarkDecodeVolInfoMsgp(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) @@ -68,8 +68,8 @@ func BenchmarkDecodeDiskInfoMsgp(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) @@ -97,8 +97,8 @@ func BenchmarkDecodeDiskInfoGOB(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { dec := gob.NewDecoder(bytes.NewBuffer(encoded)) err := dec.Decode(&v) if err != nil { @@ -123,8 +123,8 @@ func BenchmarkEncodeDiskInfoMsgp(b *testing.B) { b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := msgp.Encode(io.Discard, &v) if err != nil { b.Fatal(err) @@ -149,8 +149,8 @@ func BenchmarkEncodeDiskInfoGOB(b *testing.B) { enc := gob.NewEncoder(io.Discard) b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := enc.Encode(&v) if err != nil { b.Fatal(err) @@ -167,8 +167,8 @@ func BenchmarkDecodeFileInfoMsgp(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) @@ -184,8 +184,8 @@ func BenchmarkDecodeFileInfoGOB(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { dec := gob.NewDecoder(bytes.NewBuffer(encoded)) err := dec.Decode(&v) if err != nil { @@ -198,8 +198,8 @@ func BenchmarkEncodeFileInfoMsgp(b *testing.B) { v := FileInfo{Volume: "testbucket", Name: "src/compress/zlib/reader_test.go", VersionID: "", IsLatest: true, Deleted: false, DataDir: "5e0153cc-621a-4267-8cb6-4919140d53b3", XLV1: false, ModTime: UTCNow(), Size: 3430, Mode: 0x0, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption-Iv": "jIJPsrkkVYYMvc7edBrNl+7zcM7+ZwXqMb/YAjBO/ck=", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "my-minio-key", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAP2p7ZLv3UpLwBnsKkF2mtWba0qoY42tymK0szRgGvAxBNcXyHXYooe9dQpeeEJWgKUa/8R61oCy1mFwIg==", "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfAPFYRDkHVirJBJxBixNj3PLWt78dFuUTyTLIdLG820J7XqLPBO4gpEEEWw/DoTsJIb+apnaem+rKtQ1h3Q==", "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256", "content-type": "application/octet-stream", "etag": "20000f00e2c3709dc94905c6ce31e1cadbd1c064e14acdcd44cf0ac2db777eeedd88d639fcd64de16851ade8b21a9a1a"}, Parts: []ObjectPartInfo{{ETag: "", Number: 1, Size: 3430, ActualSize: 3398}}, Erasure: ErasureInfo{Algorithm: "reedsolomon", DataBlocks: 2, ParityBlocks: 2, BlockSize: 10485760, Index: 3, Distribution: []int{3, 4, 1, 2}, Checksums: []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}}}} b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := msgp.Encode(io.Discard, &v) if err != nil { b.Fatal(err) @@ -212,8 +212,8 @@ func BenchmarkEncodeFileInfoGOB(b *testing.B) { enc := gob.NewEncoder(io.Discard) b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := enc.Encode(&v) if err != nil { b.Fatal(err) diff --git a/cmd/streaming-signature-v4_test.go b/cmd/streaming-signature-v4_test.go index 89a729f55..8ea1301cc 100644 --- a/cmd/streaming-signature-v4_test.go +++ b/cmd/streaming-signature-v4_test.go @@ -41,7 +41,7 @@ func TestReadChunkLine(t *testing.T) { // Test - 2 bytes.NewReader([]byte("1000;")), // Test - 3 - bytes.NewReader([]byte(fmt.Sprintf("%4097d", 1))), + bytes.NewReader(fmt.Appendf(nil, "%4097d", 1)), // Test - 4 bytes.NewReader([]byte("1000;chunk-signature=111123333333333333334444211\r\n")), } diff --git a/cmd/sts-handlers.go b/cmd/sts-handlers.go index 33873b894..c4092bca3 100644 --- a/cmd/sts-handlers.go +++ b/cmd/sts-handlers.go @@ -93,7 +93,7 @@ const ( maxSTSSessionPolicySize = 2048 ) -type stsClaims map[string]interface{} +type stsClaims map[string]any func (c stsClaims) populateSessionPolicy(form url.Values) error { if len(form) == 0 { @@ -791,7 +791,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRoleWithCertificate") - claims := make(map[string]interface{}) + claims := make(map[string]any) defer logger.AuditLog(ctx, w, r, claims) if !globalIAMSys.Initialized() { @@ -977,7 +977,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRoleWithCustomToken") - claims := make(map[string]interface{}) + claims := make(map[string]any) auditLogFilterKeys := []string{stsToken} defer logger.AuditLog(ctx, w, r, claims, auditLogFilterKeys...) diff --git a/cmd/sts-handlers_test.go b/cmd/sts-handlers_test.go index 5dd0ada8c..d93443145 100644 --- a/cmd/sts-handlers_test.go +++ b/cmd/sts-handlers_test.go @@ -190,7 +190,7 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -222,7 +222,7 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) { } ] } -`, bucket, bucket)) +`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { @@ -289,7 +289,7 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -327,7 +327,7 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) { } } ] -}`, bucket, bucket, bucket, bucket)) +}`, bucket, bucket, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -403,7 +403,7 @@ func (s *TestSuiteIAM) TestSTS(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -418,7 +418,7 @@ func (s *TestSuiteIAM) TestSTS(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -488,7 +488,7 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -503,7 +503,7 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -671,7 +671,7 @@ func (s *TestSuiteIAM) TestSTSTokenRevoke(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -686,7 +686,7 @@ func (s *TestSuiteIAM) TestSTSTokenRevoke(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1303,7 +1303,7 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1318,7 +1318,7 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1449,7 +1449,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1464,7 +1464,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1601,7 +1601,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1616,7 +1616,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1768,7 +1768,7 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1783,7 +1783,7 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1970,7 +1970,7 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1985,7 +1985,7 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -2296,7 +2296,7 @@ func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) { if dnClaim != testCase.dn { c.Fatalf("Test %d: unexpected dn claim: %s", i+1, dnClaim) } - sshPublicKeyClaim := claims.MapClaims[ldapAttribPrefix+"sshPublicKey"].([]interface{})[0].(string) + sshPublicKeyClaim := claims.MapClaims[ldapAttribPrefix+"sshPublicKey"].([]any)[0].(string) if sshPublicKeyClaim == "" { c.Fatalf("Test %d: expected sshPublicKey claim to be present", i+1) } @@ -2421,7 +2421,7 @@ func (s *TestSuiteIAM) TestOpenIDSTS(c *check) { // Create policy - with name as one of the groups in OpenID the user is // a member of. policy := "projecta" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -2436,7 +2436,7 @@ func (s *TestSuiteIAM) TestOpenIDSTS(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -2526,7 +2526,7 @@ func (s *TestSuiteIAM) TestOpenIDSTSDurationSeconds(c *check) { {60, true}, {1800, false}, } { - policyBytes := []byte(fmt.Sprintf(policyTmpl, testCase.durSecs, bucket)) + policyBytes := fmt.Appendf(nil, policyTmpl, testCase.durSecs, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("Test %d: policy add error: %v", i+1, err) @@ -2586,7 +2586,7 @@ func (s *TestSuiteIAM) TestOpenIDSTSAddUser(c *check) { // Create policy - with name as one of the groups in OpenID the user is // a member of. policy := "projecta" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -2601,7 +2601,7 @@ func (s *TestSuiteIAM) TestOpenIDSTSAddUser(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -2676,7 +2676,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAcc(c *check) { // Create policy - with name as one of the groups in OpenID the user is // a member of. policy := "projecta" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -2691,7 +2691,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAcc(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -3452,7 +3452,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAccWithRolePolicyUnderAMP(c *check) { svcAK, svcSK := mustGenerateCredentials(c) // This policy does not allow listing objects. - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -3466,7 +3466,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAccWithRolePolicyUnderAMP(c *check) { ] } ] -}`, bucket)) +}`, bucket) cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ Policy: policyBytes, TargetUser: value.AccessKeyID, diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index ffe773b61..0f903625c 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -302,7 +302,7 @@ func nextSuffix() string { } // isSameType - compares two object types via reflect.TypeOf -func isSameType(obj1, obj2 interface{}) bool { +func isSameType(obj1, obj2 any) bool { return reflect.TypeOf(obj1) == reflect.TypeOf(obj2) } @@ -542,8 +542,8 @@ func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) { return nil, err } - newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", - hexChunkSize, chunkSignature)) + newChunkHdr := fmt.Appendf(nil, "%s"+s3ChunkSignatureStr+"%s\r\n", + hexChunkSize, chunkSignature) newChunk, err := io.ReadAll(bufReader) if err != nil { return nil, err @@ -564,8 +564,8 @@ func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) { return nil, err } - newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", - hexChunkSize, chunkSignature)) + newChunkHdr := fmt.Appendf(nil, "%s"+s3ChunkSignatureStr+"%s\r\n", + hexChunkSize, chunkSignature) newChunk, err := io.ReadAll(bufReader) if err != nil { return nil, err @@ -590,9 +590,9 @@ func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, err } n := badSize - newHexChunkSize := []byte(fmt.Sprintf("%x", n)) - newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", - newHexChunkSize, chunkSignature)) + newHexChunkSize := fmt.Appendf(nil, "%x", n) + newChunkHdr := fmt.Appendf(nil, "%s"+s3ChunkSignatureStr+"%s\r\n", + newHexChunkSize, chunkSignature) newChunk, err := io.ReadAll(bufReader) if err != nil { return nil, err @@ -1493,7 +1493,7 @@ func getListenNotificationURL(endPoint, bucketName string, prefixes, suffixes, e // getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX func getRandomDisks(n int) ([]string, error) { var erasureDisks []string - for i := 0; i < n; i++ { + for range n { path, err := os.MkdirTemp(globalTestTmpDir, "minio-") if err != nil { // Remove directories created so far. @@ -2119,7 +2119,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { return nil, nil, fmt.Errorf("Missing host parameter") } - publicKey := func(priv interface{}) interface{} { + publicKey := func(priv any) any { switch k := priv.(type) { case *rsa.PrivateKey: return &k.PublicKey @@ -2130,7 +2130,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { } } - pemBlockForKey := func(priv interface{}) *pem.Block { + pemBlockForKey := func(priv any) *pem.Block { switch k := priv.(type) { case *rsa.PrivateKey: return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} @@ -2146,7 +2146,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { } } - var priv interface{} + var priv any var err error priv, err = rsa.GenerateKey(crand.Reader, rsaBits) if err != nil { @@ -2175,8 +2175,8 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { BasicConstraintsValid: true, } - hosts := strings.Split(host, ",") - for _, h := range hosts { + hosts := strings.SplitSeq(host, ",") + for h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { diff --git a/cmd/tier.go b/cmd/tier.go index 0b304a4a0..b53095261 100644 --- a/cmd/tier.go +++ b/cmd/tier.go @@ -24,6 +24,7 @@ import ( "encoding/binary" "errors" "fmt" + "maps" "math/rand" "net/http" "path" @@ -495,9 +496,7 @@ func (config *TierConfigMgr) Reload(ctx context.Context, objAPI ObjectLayer) err // Remove existing tier configs clear(config.Tiers) // Copy over the new tier configs - for tier, cfg := range newConfig.Tiers { - config.Tiers[tier] = cfg - } + maps.Copy(config.Tiers, newConfig.Tiers) config.lastRefreshedAt = UTCNow() return nil } diff --git a/cmd/tier_test.go b/cmd/tier_test.go index 9cf62b8d9..2b2345c2a 100644 --- a/cmd/tier_test.go +++ b/cmd/tier_test.go @@ -27,10 +27,10 @@ func TestTierMetrics(t *testing.T) { globalTierMetrics.Observe(tier, 200*time.Millisecond) expSuccess := 10 expFailure := 5 - for i := 0; i < expSuccess; i++ { + for range expSuccess { globalTierMetrics.logSuccess(tier) } - for i := 0; i < expFailure; i++ { + for range expFailure { globalTierMetrics.logFailure(tier) } metrics := globalTierMetrics.Report() diff --git a/cmd/user-provider-utils.go b/cmd/user-provider-utils.go index 8f2ad5358..086b8de8e 100644 --- a/cmd/user-provider-utils.go +++ b/cmd/user-provider-utils.go @@ -81,7 +81,7 @@ func guessUserProvider(credentials auth.Credentials) string { } // getProviderInfoFromClaims - returns the provider info from the claims. -func populateProviderInfoFromClaims(claims map[string]interface{}, provider string, resp *madmin.InfoAccessKeyResp) { +func populateProviderInfoFromClaims(claims map[string]any, provider string, resp *madmin.InfoAccessKeyResp) { resp.UserProvider = provider switch provider { case madmin.LDAPProvider: @@ -91,7 +91,7 @@ func populateProviderInfoFromClaims(claims map[string]interface{}, provider stri } } -func getOpenIDCfgNameFromClaims(claims map[string]interface{}) (string, bool) { +func getOpenIDCfgNameFromClaims(claims map[string]any) (string, bool) { roleArn := claims[roleArnClaim] s := globalServerConfig.Clone() @@ -107,7 +107,7 @@ func getOpenIDCfgNameFromClaims(claims map[string]interface{}) (string, bool) { return "", false } -func getOpenIDInfoFromClaims(claims map[string]interface{}) madmin.OpenIDSpecificAccessKeyInfo { +func getOpenIDInfoFromClaims(claims map[string]any) madmin.OpenIDSpecificAccessKeyInfo { info := madmin.OpenIDSpecificAccessKeyInfo{} cfgName, ok := getOpenIDCfgNameFromClaims(claims) @@ -130,7 +130,7 @@ func getOpenIDInfoFromClaims(claims map[string]interface{}) madmin.OpenIDSpecifi return info } -func getLDAPInfoFromClaims(claims map[string]interface{}) madmin.LDAPSpecificAccessKeyInfo { +func getLDAPInfoFromClaims(claims map[string]any) madmin.LDAPSpecificAccessKeyInfo { info := madmin.LDAPSpecificAccessKeyInfo{} if name, ok := claims[ldapUser].(string); ok { diff --git a/cmd/utils.go b/cmd/utils.go index 833f4ac87..b7e7f79eb 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -28,6 +28,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/url" "os" @@ -36,7 +37,7 @@ import ( "runtime" "runtime/pprof" "runtime/trace" - "sort" + "slices" "strings" "sync" "time" @@ -218,9 +219,7 @@ func path2BucketObject(s string) (bucket, prefix string) { // If input is nil an empty map is returned, not nil. func cloneMSS(v map[string]string) map[string]string { r := make(map[string]string, len(v)) - for k, v := range v { - r[k] = v - } + maps.Copy(r, v) return r } @@ -237,7 +236,7 @@ func nopCharsetConverter(label string, input io.Reader) (io.Reader, error) { } // xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}, size int64) error { +func xmlDecoder(body io.Reader, v any, size int64) error { var lbody io.Reader if size > 0 { lbody = io.LimitReader(body, size) @@ -844,10 +843,7 @@ func lcp(strs []string, pre bool) string { return "" } // maximum possible length - maxl := xfixl - if strl < maxl { - maxl = strl - } + maxl := min(strl, xfixl) // compare letters if pre { // prefix, iterate left to right @@ -953,7 +949,7 @@ func auditLogInternal(ctx context.Context, opts AuditLogOptions) { entry.API.Bucket = opts.Bucket entry.API.Objects = []xaudit.ObjectVersion{{ObjectName: opts.Object, VersionID: opts.VersionID}} entry.API.Status = opts.Status - entry.Tags = make(map[string]interface{}, len(opts.Tags)) + entry.Tags = make(map[string]any, len(opts.Tags)) for k, v := range opts.Tags { entry.Tags[k] = v } @@ -1188,8 +1184,6 @@ func mapKeysSorted[Map ~map[K]V, K ordered, V any](m Map) []K { for k := range m { res = append(res, k) } - sort.Slice(res, func(i, j int) bool { - return res[i] < res[j] - }) + slices.Sort(res) return res } diff --git a/cmd/utils_test.go b/cmd/utils_test.go index bdbf17f4d..6d4e26a38 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -163,7 +163,6 @@ func TestPath2BucketObjectName(t *testing.T) { // Validate all test cases. for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { bucketName, objectName := path2BucketObject(testCase.path) if bucketName != testCase.bucket { diff --git a/cmd/xl-storage-format-utils_test.go b/cmd/xl-storage-format-utils_test.go index 91a5e4019..12f363153 100644 --- a/cmd/xl-storage-format-utils_test.go +++ b/cmd/xl-storage-format-utils_test.go @@ -68,7 +68,7 @@ func Test_hashDeterministicString(t *testing.T) { const n = 100 want := hashDeterministicString(tt.arg) m := tt.arg - for i := 0; i < n; i++ { + for range n { if got := hashDeterministicString(m); got != want { t.Errorf("hashDeterministicString() = %v, want %v", got, want) } @@ -147,7 +147,7 @@ func TestGetFileInfoVersions(t *testing.T) { xl := xlMetaV2{} var versions []FileInfo var allVersionIDs, freeVersionIDs []string - for i := 0; i < 5; i++ { + for i := range 5 { fi := basefi fi.VersionID = mustGetUUID() fi.DataDir = mustGetUUID() diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go index 128f1c096..7d26b13e5 100644 --- a/cmd/xl-storage-format-v2.go +++ b/cmd/xl-storage-format-v2.go @@ -786,10 +786,7 @@ func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) { } // CRC is variable length, so we need to truncate exactly that. - wantMax := want + msgp.Uint32Size - if wantMax > size { - wantMax = size - } + wantMax := min(want+msgp.Uint32Size, size) if err := readMore(wantMax); err != nil { return nil, err } diff --git a/cmd/xl-storage-format-v2_test.go b/cmd/xl-storage-format-v2_test.go index c9fa34f1c..6a9b15a3f 100644 --- a/cmd/xl-storage-format-v2_test.go +++ b/cmd/xl-storage-format-v2_test.go @@ -429,7 +429,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { mergeXLV2Versions(8, false, 0, vers...) } }) @@ -438,7 +438,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { mergeXLV2Versions(8, false, 1, vers...) } }) @@ -447,7 +447,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { mergeXLV2Versions(8, false, 1, vers...) } }) @@ -469,7 +469,7 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { err = xl.Load(data) if err != nil { b.Fatal(err) @@ -490,7 +490,7 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { err = xl.Load(data) if err != nil { b.Fatal(err) @@ -1018,7 +1018,7 @@ func Test_mergeXLV2Versions2(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { // Run multiple times, shuffling the input order. - for i := int64(0); i < 50; i++ { + for i := range int64(50) { t.Run(fmt.Sprint(i), func(t *testing.T) { rng := rand.New(rand.NewSource(i)) rng.Shuffle(len(test.input), func(i, j int) { @@ -1067,7 +1067,7 @@ func Test_mergeEntryChannels(t *testing.T) { } // Shuffle... - for i := 0; i < 100; i++ { + for i := range 100 { rng := rand.New(rand.NewSource(int64(i))) rng.Shuffle(len(vers), func(i, j int) { vers[i], vers[j] = vers[j], vers[i] @@ -1174,7 +1174,7 @@ func benchmarkManyPartsOptionally(b *testing.B, allParts bool) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err = buf.ToFileInfo("volume", "path", "", allParts) if err != nil { b.Fatal(err) diff --git a/cmd/xl-storage-format_test.go b/cmd/xl-storage-format_test.go index 9ba58d769..12f4b052e 100644 --- a/cmd/xl-storage-format_test.go +++ b/cmd/xl-storage-format_test.go @@ -136,7 +136,7 @@ func getSampleXLMeta(totalParts int) xlMetaV1Object { xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts) // total number of parts. xlMeta.Parts = make([]ObjectPartInfo, totalParts) - for i := 0; i < totalParts; i++ { + for i := range totalParts { // hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.meta the magnitude doesn't affect the test, // The magnitude doesn't make a difference, only the size does. xlMeta.AddTestObjectCheckSum(i+1, BLAKE2b512, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a") @@ -378,7 +378,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.Run(fmt.Sprint(size, "-versions"), func(b *testing.B) { var xl xlMetaV2 ids := make([]string, size) - for i := 0; i < size; i++ { + for i := range size { fi.VersionID = mustGetUUID() fi.DataDir = mustGetUUID() ids[i] = fi.VersionID @@ -397,7 +397,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -424,7 +424,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -449,7 +449,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -476,7 +476,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -494,7 +494,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -512,7 +512,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { buf, _, _ := isIndexedMetaV2(enc) if buf == nil { b.Fatal("buf == nil") @@ -527,7 +527,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { buf, _, _ := isIndexedMetaV2(enc) if buf == nil { b.Fatal("buf == nil") diff --git a/cmd/xl-storage-meta-inline.go b/cmd/xl-storage-meta-inline.go index 76b1f8a79..06ed65e8d 100644 --- a/cmd/xl-storage-meta-inline.go +++ b/cmd/xl-storage-meta-inline.go @@ -20,6 +20,7 @@ package cmd import ( "errors" "fmt" + "slices" "github.com/tinylib/msgp/msgp" ) @@ -56,7 +57,7 @@ func (x xlMetaInlineData) find(key string) []byte { if err != nil || sz == 0 { return nil } - for i := uint32(0); i < sz; i++ { + for range sz { var found []byte found, buf, err = msgp.ReadMapKeyZC(buf) if err != nil || sz == 0 { @@ -91,7 +92,7 @@ func (x xlMetaInlineData) validate() error { return fmt.Errorf("xlMetaInlineData: %w", err) } - for i := uint32(0); i < sz; i++ { + for i := range sz { var key []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -131,7 +132,7 @@ func (x *xlMetaInlineData) repair() { // Remove all current data keys := make([][]byte, 0, sz) vals := make([][]byte, 0, sz) - for i := uint32(0); i < sz; i++ { + for range sz { var key, val []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -165,7 +166,7 @@ func (x xlMetaInlineData) list() ([]string, error) { return nil, err } keys := make([]string, 0, sz) - for i := uint32(0); i < sz; i++ { + for i := range sz { var key []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -231,7 +232,7 @@ func (x *xlMetaInlineData) replace(key string, value []byte) { // Version plus header... plSize := 1 + msgp.MapHeaderSize replaced := false - for i := uint32(0); i < sz; i++ { + for range sz { var found, foundVal []byte var err error found, buf, err = msgp.ReadMapKeyZC(buf) @@ -276,7 +277,7 @@ func (x *xlMetaInlineData) rename(oldKey, newKey string) bool { // Version plus header... plSize := 1 + msgp.MapHeaderSize found := false - for i := uint32(0); i < sz; i++ { + for range sz { var foundKey, foundVal []byte var err error foundKey, buf, err = msgp.ReadMapKeyZC(buf) @@ -329,19 +330,14 @@ func (x *xlMetaInlineData) remove(keys ...string) bool { } } else { removeKey = func(s []byte) bool { - for _, key := range keys { - if key == string(s) { - return true - } - } - return false + return slices.Contains(keys, string(s)) } } // Version plus header... plSize := 1 + msgp.MapHeaderSize found := false - for i := uint32(0); i < sz; i++ { + for range sz { var foundKey, foundVal []byte var err error foundKey, buf, err = msgp.ReadMapKeyZC(buf) diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go index 476e5e27b..c27647f24 100644 --- a/cmd/xl-storage_test.go +++ b/cmd/xl-storage_test.go @@ -1169,7 +1169,7 @@ func TestXLStorageReadFile(t *testing.T) { } } - for l := 0; l < 2; l++ { + for range 2 { // Following block validates all ReadFile test cases. for i, testCase := range testCases { var n int64 diff --git a/docs/debugging/healing-bin/main.go b/docs/debugging/healing-bin/main.go index ac22fa2ab..e0e4e84f2 100644 --- a/docs/debugging/healing-bin/main.go +++ b/docs/debugging/healing-bin/main.go @@ -56,7 +56,7 @@ FLAGS: cli.ShowAppHelpAndExit(c, 1) // last argument is exit code } - ht := make(map[string]map[string]interface{}) + ht := make(map[string]map[string]any) file := c.Args().Get(0) if strings.HasSuffix(file, ".zip") { var sz int64 @@ -91,7 +91,7 @@ FLAGS: dec := json.NewDecoder(buf) // Use number to preserve integers. dec.UseNumber() - var htr map[string]interface{} + var htr map[string]any if err = dec.Decode(&htr); err != nil { return err } @@ -113,7 +113,7 @@ FLAGS: if _, err = msgp.CopyToJSON(buf, bytes.NewReader(b)); err != nil { return err } - var htr map[string]interface{} + var htr map[string]any dec := json.NewDecoder(buf) // Use number to preserve integers. dec.UseNumber() diff --git a/docs/debugging/xl-meta/main.go b/docs/debugging/xl-meta/main.go index 4ac25b4b1..f25cce17d 100644 --- a/docs/debugging/xl-meta/main.go +++ b/docs/debugging/xl-meta/main.go @@ -31,6 +31,7 @@ import ( "os" "path/filepath" "regexp" + "slices" "sort" "strconv" "strings" @@ -309,7 +310,7 @@ FLAGS: if ndjson { return buf.Bytes(), nil } - var msi map[string]interface{} + var msi map[string]any dec := json.NewDecoder(buf) // Use number to preserve integers. dec.UseNumber() @@ -390,7 +391,7 @@ FLAGS: if err != nil { return err } - var tmp map[string]interface{} + var tmp map[string]any if err := json.Unmarshal(b2, &tmp); err == nil { if b3, err := json.Marshal(tmp); err == nil { b2 = b3 @@ -578,7 +579,7 @@ func (x xlMetaInlineData) json(value bool) ([]byte, error) { } res := []byte("{") - for i := uint32(0); i < sz; i++ { + for i := range sz { var key, val []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -643,7 +644,7 @@ func (x xlMetaInlineData) files(fn func(name string, data []byte)) error { return err } - for i := uint32(0); i < sz; i++ { + for i := range sz { var key, val []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -703,7 +704,7 @@ func decodeXLHeaders(buf []byte) (x xlHeaders, b []byte, err error) { // Any non-nil error is returned. func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) { var tHdr, tMeta []byte // Zero copy bytes - for i := 0; i < versions; i++ { + for i := range versions { tHdr, buf, err = msgp.ReadBytesZC(buf) if err != nil { return err @@ -985,12 +986,9 @@ func combine(files []string, out string) error { } ok := len(splitFilled) for i, sh := range splitFilled { - for _, v := range sh { - if v == 0 { - split[i] = nil - ok-- - break - } + if slices.Contains(sh, 0) { + split[i] = nil + ok-- } } hasParity := 0 @@ -1246,7 +1244,7 @@ func combineCrossVer(all map[string][]string, baseName string) error { } // Fill padding... padding := len(splitFilled[0])*k - len(m.filled) - for i := 0; i < padding; i++ { + for i := range padding { arr := splitFilled[k-1] arr[len(arr)-i-1] = 1 } diff --git a/internal/amztime/iso8601_time_test.go b/internal/amztime/iso8601_time_test.go index 73270a4e7..34c60204c 100644 --- a/internal/amztime/iso8601_time_test.go +++ b/internal/amztime/iso8601_time_test.go @@ -46,7 +46,6 @@ func TestISO8601Format(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.expectedOutput, func(t *testing.T) { gotOutput := ISO8601Format(testCase.date) t.Log("Go", testCase.date.Format(iso8601TimeFormat)) diff --git a/internal/amztime/parse_test.go b/internal/amztime/parse_test.go index f5716e3c0..144ef81c6 100644 --- a/internal/amztime/parse_test.go +++ b/internal/amztime/parse_test.go @@ -44,7 +44,6 @@ func TestParse(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.timeStr, func(t *testing.T) { gott, goterr := Parse(testCase.timeStr) if !errors.Is(goterr, testCase.expectedErr) { diff --git a/internal/auth/credentials.go b/internal/auth/credentials.go index 2c8bcb05b..764b7cad0 100644 --- a/internal/auth/credentials.go +++ b/internal/auth/credentials.go @@ -111,16 +111,16 @@ const ( // Credentials holds access and secret keys. type Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty" yaml:"-"` - Status string `xml:"-" json:"status,omitempty"` - ParentUser string `xml:"-" json:"parentUser,omitempty"` - Groups []string `xml:"-" json:"groups,omitempty"` - Claims map[string]interface{} `xml:"-" json:"claims,omitempty"` - Name string `xml:"-" json:"name,omitempty"` - Description string `xml:"-" json:"description,omitempty"` + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"` + Expiration time.Time `xml:"Expiration" json:"expiration" yaml:"-"` + Status string `xml:"-" json:"status,omitempty"` + ParentUser string `xml:"-" json:"parentUser,omitempty"` + Groups []string `xml:"-" json:"groups,omitempty"` + Claims map[string]any `xml:"-" json:"claims,omitempty"` + Name string `xml:"-" json:"name,omitempty"` + Description string `xml:"-" json:"description,omitempty"` // Deprecated: In favor of Description - when reading credentials from // storage the value of this field is placed in the Description field above @@ -196,7 +196,7 @@ var timeSentinel = time.Unix(0, 0).UTC() var ErrInvalidDuration = errors.New("invalid token expiry") // ExpToInt64 - convert input interface value to int64. -func ExpToInt64(expI interface{}) (expAt int64, err error) { +func ExpToInt64(expI any) (expAt int64, err error) { switch exp := expI.(type) { case string: expAt, err = strconv.ParseInt(exp, 10, 64) @@ -293,7 +293,7 @@ func GenerateSecretKey(length int, random io.Reader) (string, error) { } // GetNewCredentialsWithMetadata generates and returns new credential with expiry. -func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) (Credentials, error) { +func GetNewCredentialsWithMetadata(m map[string]any, tokenSecret string) (Credentials, error) { accessKey, secretKey, err := GenerateCredentials() if err != nil { return Credentials{}, err @@ -303,7 +303,7 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) // CreateNewCredentialsWithMetadata - creates new credentials using the specified access & secret keys // and generate a session token if a secret token is provided. -func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]interface{}, tokenSecret string) (cred Credentials, err error) { +func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]any, tokenSecret string) (cred Credentials, err error) { if len(accessKey) < accessKeyMinLen || len(accessKey) > accessKeyMaxLen { return Credentials{}, ErrInvalidAccessKeyLength } @@ -336,7 +336,7 @@ func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string] } // JWTSignWithAccessKey - generates a session token. -func JWTSignWithAccessKey(accessKey string, m map[string]interface{}, tokenSecret string) (string, error) { +func JWTSignWithAccessKey(accessKey string, m map[string]any, tokenSecret string) (string, error) { m["accessKey"] = accessKey jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m)) return jwt.SignedString([]byte(tokenSecret)) @@ -362,7 +362,7 @@ func ExtractClaims(token, secretKey string) (*jwt.MapClaims, error) { // GetNewCredentials generates and returns new credential. func GetNewCredentials() (cred Credentials, err error) { - return GetNewCredentialsWithMetadata(map[string]interface{}{}, "") + return GetNewCredentialsWithMetadata(map[string]any{}, "") } // CreateCredentials returns new credential with the given access key and secret key. diff --git a/internal/auth/credentials_test.go b/internal/auth/credentials_test.go index 643cab319..9e83f4b59 100644 --- a/internal/auth/credentials_test.go +++ b/internal/auth/credentials_test.go @@ -25,7 +25,7 @@ import ( func TestExpToInt64(t *testing.T) { testCases := []struct { - exp interface{} + exp any expectedFailure bool }{ {"", true}, @@ -42,7 +42,6 @@ func TestExpToInt64(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { _, err := ExpToInt64(testCase.exp) if err != nil && !testCase.expectedFailure { diff --git a/internal/bpool/bpool_test.go b/internal/bpool/bpool_test.go index a91d9ad43..da673017e 100644 --- a/internal/bpool/bpool_test.go +++ b/internal/bpool/bpool_test.go @@ -71,7 +71,7 @@ func TestBytePool(t *testing.T) { } // lets drain the buf channel first before we validate invalid buffers. - for i := uint64(0); i < size; i++ { + for range size { bp.Get() // discard } diff --git a/internal/bucket/bandwidth/monitor.go b/internal/bucket/bandwidth/monitor.go index 48a989a2d..b523030bc 100644 --- a/internal/bucket/bandwidth/monitor.go +++ b/internal/bucket/bandwidth/monitor.go @@ -21,6 +21,7 @@ package bandwidth import ( "context" + "slices" "sync" "time" @@ -83,12 +84,7 @@ func SelectBuckets(buckets ...string) SelectionFunction { } } return func(bucket string) bool { - for _, bkt := range buckets { - if bkt == bucket { - return true - } - } - return false + return slices.Contains(buckets, bucket) } } diff --git a/internal/bucket/bandwidth/monitor_test.go b/internal/bucket/bandwidth/monitor_test.go index fbeac14e7..4799ce073 100644 --- a/internal/bucket/bandwidth/monitor_test.go +++ b/internal/bucket/bandwidth/monitor_test.go @@ -99,7 +99,6 @@ func TestMonitor_GetReport(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() thr := bucketThrottle{ diff --git a/internal/bucket/lifecycle/error.go b/internal/bucket/lifecycle/error.go index 9676871a4..c3a8572f0 100644 --- a/internal/bucket/lifecycle/error.go +++ b/internal/bucket/lifecycle/error.go @@ -29,7 +29,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { return Error{err: fmt.Errorf(format, a...)} } diff --git a/internal/bucket/lifecycle/lifecycle_test.go b/internal/bucket/lifecycle/lifecycle_test.go index 5f0f590f8..2af5556ed 100644 --- a/internal/bucket/lifecycle/lifecycle_test.go +++ b/internal/bucket/lifecycle/lifecycle_test.go @@ -738,7 +738,6 @@ func TestEval(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run("", func(t *testing.T) { lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { @@ -823,7 +822,6 @@ func TestHasActiveRules(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) { lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { diff --git a/internal/bucket/object/lock/lock.go b/internal/bucket/object/lock/lock.go index 79f1421f5..b0d929f76 100644 --- a/internal/bucket/object/lock/lock.go +++ b/internal/bucket/object/lock/lock.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/textproto" "strings" @@ -601,9 +602,7 @@ func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filte } if !copied { dst = make(map[string]string, len(metadata)) - for k, v := range metadata { - dst[k] = v - } + maps.Copy(dst, metadata) copied = true } delete(dst, key) diff --git a/internal/bucket/object/lock/lock_test.go b/internal/bucket/object/lock/lock_test.go index e31586a76..be7975e28 100644 --- a/internal/bucket/object/lock/lock_test.go +++ b/internal/bucket/object/lock/lock_test.go @@ -174,7 +174,6 @@ func TestParseObjectLockConfig(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run("", func(t *testing.T) { _, err := ParseObjectLockConfig(strings.NewReader(tt.value)) //nolint:gocritic @@ -219,7 +218,6 @@ func TestParseObjectRetention(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run("", func(t *testing.T) { _, err := ParseObjectRetention(strings.NewReader(tt.value)) //nolint:gocritic diff --git a/internal/bucket/replication/error.go b/internal/bucket/replication/error.go index 7d5178d84..b653c8630 100644 --- a/internal/bucket/replication/error.go +++ b/internal/bucket/replication/error.go @@ -29,7 +29,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { return Error{err: fmt.Errorf(format, a...)} } diff --git a/internal/bucket/replication/replication_test.go b/internal/bucket/replication/replication_test.go index 26c72b28d..7732f8caa 100644 --- a/internal/bucket/replication/replication_test.go +++ b/internal/bucket/replication/replication_test.go @@ -296,7 +296,6 @@ func TestReplicate(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.opts.Name, func(t *testing.T) { result := testCase.c.Replicate(testCase.opts) if result != testCase.expectedResult { @@ -352,7 +351,6 @@ func TestHasActiveRules(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) { cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { @@ -402,7 +400,6 @@ func TestFilterActionableRules(t *testing.T) { }, } for _, tc := range testCases { - tc := tc cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { t.Fatalf("Got unexpected error: %v", err) diff --git a/internal/bucket/replication/rule.go b/internal/bucket/replication/rule.go index 0c6b6bd04..347dfe28d 100644 --- a/internal/bucket/replication/rule.go +++ b/internal/bucket/replication/rule.go @@ -139,7 +139,7 @@ type Rule struct { Destination Destination `xml:"Destination" json:"Destination"` SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` Filter Filter `xml:"Filter" json:"Filter"` - ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication"` } var ( diff --git a/internal/bucket/replication/rule_test.go b/internal/bucket/replication/rule_test.go index 0e883e4e9..32722c970 100644 --- a/internal/bucket/replication/rule_test.go +++ b/internal/bucket/replication/rule_test.go @@ -57,7 +57,6 @@ func TestMetadataReplicate(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) { cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { diff --git a/internal/bucket/versioning/error.go b/internal/bucket/versioning/error.go index 6b652c0ae..20bb4caa2 100644 --- a/internal/bucket/versioning/error.go +++ b/internal/bucket/versioning/error.go @@ -29,7 +29,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { return Error{err: fmt.Errorf(format, a...)} } diff --git a/internal/color/color.go b/internal/color/color.go index d7dae3b4b..e45851e9e 100644 --- a/internal/color/color.go +++ b/internal/color/color.go @@ -31,119 +31,119 @@ var ( return !color.NoColor } - Bold = func() func(format string, a ...interface{}) string { + Bold = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.Bold).SprintfFunc() } return fmt.Sprintf }() - RedBold = func() func(a ...interface{}) string { + RedBold = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgRed, color.Bold).SprintFunc() } return fmt.Sprint }() - RedBoldf = func() func(format string, a ...interface{}) string { + RedBoldf = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgRed, color.Bold).SprintfFunc() } return fmt.Sprintf }() - Red = func() func(format string, a ...interface{}) string { + Red = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgRed).SprintfFunc() } return fmt.Sprintf }() - Blue = func() func(format string, a ...interface{}) string { + Blue = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgBlue).SprintfFunc() } return fmt.Sprintf }() - Yellow = func() func(format string, a ...interface{}) string { + Yellow = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgYellow).SprintfFunc() } return fmt.Sprintf }() - Green = func() func(a ...interface{}) string { + Green = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgGreen).SprintFunc() } return fmt.Sprint }() - Greenf = func() func(format string, a ...interface{}) string { + Greenf = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgGreen).SprintfFunc() } return fmt.Sprintf }() - GreenBold = func() func(a ...interface{}) string { + GreenBold = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgGreen, color.Bold).SprintFunc() } return fmt.Sprint }() - CyanBold = func() func(a ...interface{}) string { + CyanBold = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgCyan, color.Bold).SprintFunc() } return fmt.Sprint }() - YellowBold = func() func(format string, a ...interface{}) string { + YellowBold = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgYellow, color.Bold).SprintfFunc() } return fmt.Sprintf }() - BlueBold = func() func(format string, a ...interface{}) string { + BlueBold = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgBlue, color.Bold).SprintfFunc() } return fmt.Sprintf }() - BgYellow = func() func(format string, a ...interface{}) string { + BgYellow = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.BgYellow).SprintfFunc() } return fmt.Sprintf }() - Black = func() func(format string, a ...interface{}) string { + Black = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgBlack).SprintfFunc() } return fmt.Sprintf }() - FgRed = func() func(a ...interface{}) string { + FgRed = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgRed).SprintFunc() } return fmt.Sprint }() - BgRed = func() func(format string, a ...interface{}) string { + BgRed = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.BgRed).SprintfFunc() } return fmt.Sprintf }() - FgWhite = func() func(format string, a ...interface{}) string { + FgWhite = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgWhite).SprintfFunc() } diff --git a/internal/config/api/api.go b/internal/config/api/api.go index d3ab6a1fb..f203f7e95 100644 --- a/internal/config/api/api.go +++ b/internal/config/api/api.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "math" + "slices" "strconv" "strings" "time" @@ -224,10 +225,8 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { corsAllowOrigin = []string{"*"} // defaults to '*' } else { corsAllowOrigin = strings.Split(corsList, ",") - for _, cors := range corsAllowOrigin { - if cors == "" { - return cfg, errors.New("invalid cors value") - } + if slices.Contains(corsAllowOrigin, "") { + return cfg, errors.New("invalid cors value") } } cfg.CorsAllowOrigin = corsAllowOrigin diff --git a/internal/config/compress/compress_test.go b/internal/config/compress/compress_test.go index 124d182db..12daa9f16 100644 --- a/internal/config/compress/compress_test.go +++ b/internal/config/compress/compress_test.go @@ -41,7 +41,6 @@ func TestParseCompressIncludes(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.str, func(t *testing.T) { gotPatterns, err := parseCompressIncludes(testCase.str) if !testCase.success && err == nil { diff --git a/internal/config/config.go b/internal/config/config.go index 1a23f564f..0b158aebe 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -21,7 +21,9 @@ import ( "bufio" "fmt" "io" + "maps" "regexp" + "slices" "sort" "strings" "sync" @@ -61,7 +63,7 @@ type ErrConfigNotFound struct { func Error[T ErrorConfig, PT interface { *T setMsg(string) -}](format string, vals ...interface{}, +}](format string, vals ...any, ) T { pt := PT(new(T)) pt.setMsg(fmt.Sprintf(format, vals...)) @@ -69,7 +71,7 @@ func Error[T ErrorConfig, PT interface { } // Errorf formats an error and returns it as a generic config error -func Errorf(format string, vals ...interface{}) ErrConfigGeneric { +func Errorf(format string, vals ...any) ErrConfigGeneric { return Error[ErrConfigGeneric](format, vals...) } @@ -238,9 +240,7 @@ var DefaultKVS = map[string]KVS{} // globally, this should be called only once preferably // during `init()`. func RegisterDefaultKVS(kvsMap map[string]KVS) { - for subSys, kvs := range kvsMap { - DefaultKVS[subSys] = kvs - } + maps.Copy(DefaultKVS, kvsMap) } // HelpSubSysMap - help for all individual KVS for each sub-systems @@ -253,9 +253,7 @@ var HelpSubSysMap = map[string]HelpKVS{} // this function should be called only once // preferably in during `init()`. func RegisterHelpSubSys(helpKVSMap map[string]HelpKVS) { - for subSys, hkvs := range helpKVSMap { - HelpSubSysMap[subSys] = hkvs - } + maps.Copy(HelpSubSysMap, helpKVSMap) } // HelpDeprecatedSubSysMap - help for all deprecated sub-systems, that may be @@ -265,9 +263,7 @@ var HelpDeprecatedSubSysMap = map[string]HelpKV{} // RegisterHelpDeprecatedSubSys - saves input help KVS for deprecated // sub-systems globally. Should be called only once at init. func RegisterHelpDeprecatedSubSys(helpDeprecatedKVMap map[string]HelpKV) { - for k, v := range helpDeprecatedKVMap { - HelpDeprecatedSubSysMap[k] = v - } + maps.Copy(HelpDeprecatedSubSysMap, helpDeprecatedKVMap) } // KV - is a shorthand of each key value. @@ -353,9 +349,7 @@ func Merge(cfgKVS map[string]KVS, envname string, defaultKVS KVS) map[string]KVS } newCfgKVS[tgt] = defaultKVS } - for tgt, kv := range cfgKVS { - newCfgKVS[tgt] = kv - } + maps.Copy(newCfgKVS, cfgKVS) return newCfgKVS } @@ -642,11 +636,8 @@ func CheckValidKeys(subSys string, kv KVS, validKVS KVS, deprecatedKeys ...strin continue } var skip bool - for _, deprecatedKey := range deprecatedKeys { - if kv.Key == deprecatedKey { - skip = true - break - } + if slices.Contains(deprecatedKeys, kv.Key) { + skip = true } if skip { continue @@ -852,7 +843,7 @@ func (c Config) DelKVS(s string) error { if len(inputs) == 2 { currKVS := ck.Clone() defKVS := DefaultKVS[subSys] - for _, delKey := range strings.Fields(inputs[1]) { + for delKey := range strings.FieldsSeq(inputs[1]) { _, ok := currKVS.Lookup(delKey) if !ok { return Error[ErrConfigNotFound]("key %s doesn't exist", delKey) @@ -1407,13 +1398,7 @@ func (c Config) GetSubsysInfo(subSys, target string, redactSecrets bool) ([]Subs } if target != "" { - found := false - for _, t := range targets { - if t == target { - found = true - break - } - } + found := slices.Contains(targets, target) if !found { return nil, Errorf("there is no target `%s` for subsystem `%s`", target, subSys) } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 9a0a3f65e..e55d446b3 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -88,7 +88,6 @@ func TestKVFields(t *testing.T) { }, } for _, test := range tests { - test := test t.Run("", func(t *testing.T) { gotFields := kvFields(test.input, test.keys) if len(gotFields) != len(test.expectedFields) { diff --git a/internal/config/crypto_test.go b/internal/config/crypto_test.go index 75dbe9a6b..224d230bd 100644 --- a/internal/config/crypto_test.go +++ b/internal/config/crypto_test.go @@ -100,7 +100,7 @@ func BenchmarkEncrypt(b *testing.B) { context = kms.Context{"key": "value"} ) b.SetBytes(int64(size)) - for i := 0; i < b.N; i++ { + for b.Loop() { ciphertext, err := Encrypt(KMS, plaintext, context) if err != nil { b.Fatal(err) diff --git a/internal/config/errors-utils.go b/internal/config/errors-utils.go index 9ae52d8f9..3d75ab73e 100644 --- a/internal/config/errors-utils.go +++ b/internal/config/errors-utils.go @@ -65,7 +65,7 @@ func (u Err) Msg(m string) Err { } // Msgf - Replace the current error's message -func (u Err) Msgf(m string, args ...interface{}) Err { +func (u Err) Msgf(m string, args ...any) Err { e := u.Clone() if len(args) == 0 { e.msg = m @@ -76,7 +76,7 @@ func (u Err) Msgf(m string, args ...interface{}) Err { } // Hint - Replace the current error's message -func (u Err) Hint(m string, args ...interface{}) Err { +func (u Err) Hint(m string, args ...any) Err { e := u.Clone() e.hint = fmt.Sprintf(m, args...) return e diff --git a/internal/config/etcd/etcd_test.go b/internal/config/etcd/etcd_test.go index d9889ca96..e7aad0f79 100644 --- a/internal/config/etcd/etcd_test.go +++ b/internal/config/etcd/etcd_test.go @@ -49,7 +49,6 @@ func TestParseEndpoints(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.s, func(t *testing.T) { endpoints, secure, err := parseEndpoints(testCase.s) if err != nil && testCase.success { diff --git a/internal/config/identity/openid/jwt.go b/internal/config/identity/openid/jwt.go index 0be788d8c..2a422010f 100644 --- a/internal/config/identity/openid/jwt.go +++ b/internal/config/identity/openid/jwt.go @@ -38,7 +38,7 @@ type publicKeys struct { *sync.RWMutex // map of kid to public key - pkMap map[string]interface{} + pkMap map[string]any } func (pk *publicKeys) parseAndAdd(b io.Reader) error { @@ -59,14 +59,14 @@ func (pk *publicKeys) parseAndAdd(b io.Reader) error { return nil } -func (pk *publicKeys) add(keyID string, key interface{}) { +func (pk *publicKeys) add(keyID string, key any) { pk.Lock() defer pk.Unlock() pk.pkMap[keyID] = key } -func (pk *publicKeys) get(kid string) interface{} { +func (pk *publicKeys) get(kid string) any { pk.RLock() defer pk.RUnlock() return pk.pkMap[kid] @@ -103,7 +103,7 @@ var ( ErrTokenExpired = errors.New("token expired") ) -func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error { +func updateClaimsExpiry(dsecs string, claims map[string]any) error { expStr := claims["exp"] if expStr == "" { return ErrTokenExpired @@ -133,7 +133,7 @@ const ( ) // Validate - validates the id_token. -func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims map[string]interface{}) error { +func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims map[string]any) error { jp := new(jwtgo.Parser) jp.ValidMethods = []string{ "RS256", "RS384", "RS512", @@ -143,7 +143,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, "ES3256", "ES3384", "ES3512", } - keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) { + keyFuncCallback := func(jwtToken *jwtgo.Token) (any, error) { kid, ok := jwtToken.Header["kid"].(string) if !ok { return nil, fmt.Errorf("Invalid kid value %v", jwtToken.Header["kid"]) @@ -221,7 +221,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, return nil } -func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]interface{}) error { +func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]any) error { pCfg, ok := r.arnProviderCfgsMap[arn] // If claim user info is enabled, get claims from userInfo // and overwrite them with the claims from JWT. diff --git a/internal/config/identity/openid/jwt_test.go b/internal/config/identity/openid/jwt_test.go index 8c1256ab6..cb54faff9 100644 --- a/internal/config/identity/openid/jwt_test.go +++ b/internal/config/identity/openid/jwt_test.go @@ -39,7 +39,7 @@ import ( func TestUpdateClaimsExpiry(t *testing.T) { testCases := []struct { - exp interface{} + exp any dsecs string expectedFailure bool }{ @@ -58,9 +58,8 @@ func TestUpdateClaimsExpiry(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { - claims := map[string]interface{}{} + claims := map[string]any{} claims["exp"] = testCase.exp err := updateClaimsExpiry(testCase.dsecs, claims) if err != nil && !testCase.expectedFailure { @@ -99,7 +98,7 @@ func TestJWTHMACType(t *testing.T) { ExpiresAt: 253428928061, Audience: "76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", }, - Header: map[string]interface{}{ + Header: map[string]any{ "typ": "JWT", "alg": jwtgo.SigningMethodHS256.Alg(), "kid": "76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", @@ -119,7 +118,7 @@ func TestJWTHMACType(t *testing.T) { pubKeys := publicKeys{ RWMutex: &sync.RWMutex{}, - pkMap: map[string]interface{}{}, + pkMap: map[string]any{}, } pubKeys.add("76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", []byte("WNGvKVyyNmXq0TraSvjaDN9CtpFgx35IXtGEffMCPR0")) @@ -165,7 +164,7 @@ func TestJWT(t *testing.T) { pubKeys := publicKeys{ RWMutex: &sync.RWMutex{}, - pkMap: map[string]interface{}{}, + pkMap: map[string]any{}, } err := pubKeys.parseAndAdd(bytes.NewBuffer([]byte(jsonkey))) if err != nil { diff --git a/internal/config/identity/openid/openid.go b/internal/config/identity/openid/openid.go index c90c60d3e..003ede923 100644 --- a/internal/config/identity/openid/openid.go +++ b/internal/config/identity/openid/openid.go @@ -22,7 +22,9 @@ import ( "encoding/base64" "errors" "io" + "maps" "net/http" + "slices" "sort" "strconv" "strings" @@ -186,15 +188,9 @@ func (r *Config) Clone() Config { transport: r.transport, closeRespFn: r.closeRespFn, } - for k, v := range r.arnProviderCfgsMap { - cfg.arnProviderCfgsMap[k] = v - } - for k, v := range r.ProviderCfgs { - cfg.ProviderCfgs[k] = v - } - for k, v := range r.roleArnPolicyMap { - cfg.roleArnPolicyMap[k] = v - } + maps.Copy(cfg.arnProviderCfgsMap, r.arnProviderCfgsMap) + maps.Copy(cfg.ProviderCfgs, r.ProviderCfgs) + maps.Copy(cfg.roleArnPolicyMap, r.roleArnPolicyMap) return cfg } @@ -210,7 +206,7 @@ func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func ProviderCfgs: map[string]*providerCfg{}, pubKeys: publicKeys{ RWMutex: &sync.RWMutex{}, - pkMap: map[string]interface{}{}, + pkMap: map[string]any{}, }, roleArnPolicyMap: map[arn.ARN]string{}, transport: openIDClientTransport, @@ -308,7 +304,7 @@ func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func if scopeList := getCfgVal(Scopes); scopeList != "" { var scopes []string - for _, scope := range strings.Split(scopeList, ",") { + for scope := range strings.SplitSeq(scopeList, ",") { scope = strings.TrimSpace(scope) if scope == "" { return c, config.Errorf("empty scope value is not allowed '%s', please refer to our documentation", scopeList) @@ -414,13 +410,7 @@ func (r *Config) GetConfigInfo(s config.Config, cfgName string) ([]madmin.IDPCfg return nil, err } - present := false - for _, cfg := range openIDConfigs { - if cfg == cfgName { - present = true - break - } - } + present := slices.Contains(openIDConfigs, cfgName) if !present { return nil, ErrProviderConfigNotFound diff --git a/internal/config/identity/openid/providercfg.go b/internal/config/identity/openid/providercfg.go index 0c0a91534..1ccc230cc 100644 --- a/internal/config/identity/openid/providercfg.go +++ b/internal/config/identity/openid/providercfg.go @@ -113,7 +113,7 @@ func (p *providerCfg) GetRoleArn() string { // claims as part of the normal oauth2 flow, instead rely // on service providers making calls to IDP to fetch additional // claims available from the UserInfo endpoint -func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]interface{}, error) { +func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]any, error) { if p.JWKS.URL == nil || p.JWKS.URL.String() == "" { return nil, errors.New("openid not configured") } @@ -147,7 +147,7 @@ func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transpor return nil, errors.New(resp.Status) } - claims := map[string]interface{}{} + claims := map[string]any{} if err = json.NewDecoder(resp.Body).Decode(&claims); err != nil { // uncomment this for debugging when needed. // reqBytes, _ := httputil.DumpRequest(req, false) diff --git a/internal/config/identity/plugin/config.go b/internal/config/identity/plugin/config.go index 8714b3bc8..3b0ac830f 100644 --- a/internal/config/identity/plugin/config.go +++ b/internal/config/identity/plugin/config.go @@ -333,9 +333,9 @@ func New(shutdownCtx context.Context, args Args) *AuthNPlugin { // AuthNSuccessResponse - represents the response from the authentication plugin // service. type AuthNSuccessResponse struct { - User string `json:"user"` - MaxValiditySeconds int `json:"maxValiditySeconds"` - Claims map[string]interface{} `json:"claims"` + User string `json:"user"` + MaxValiditySeconds int `json:"maxValiditySeconds"` + Claims map[string]any `json:"claims"` } // AuthNErrorResponse - represents an error response from the authN plugin. diff --git a/internal/config/lambda/event/targetidset.go b/internal/config/lambda/event/targetidset.go index e77affff9..eea481026 100644 --- a/internal/config/lambda/event/targetidset.go +++ b/internal/config/lambda/event/targetidset.go @@ -17,6 +17,8 @@ package event +import "maps" + // TargetIDSet - Set representation of TargetIDs. type TargetIDSet map[TargetID]struct{} @@ -28,9 +30,7 @@ func (set TargetIDSet) IsEmpty() bool { // Clone - returns copy of this set. func (set TargetIDSet) Clone() TargetIDSet { setCopy := NewTargetIDSet() - for k, v := range set { - setCopy[k] = v - } + maps.Copy(setCopy, set) return setCopy } diff --git a/internal/config/lambda/event/targetlist.go b/internal/config/lambda/event/targetlist.go index 1bf295414..343503180 100644 --- a/internal/config/lambda/event/targetlist.go +++ b/internal/config/lambda/event/targetlist.go @@ -19,6 +19,7 @@ package event import ( "fmt" + "maps" "net/http" "strings" "sync" @@ -151,9 +152,7 @@ func (list *TargetList) TargetMap() map[TargetID]Target { defer list.RUnlock() ntargets := make(map[TargetID]Target, len(list.targets)) - for k, v := range list.targets { - ntargets[k] = v - } + maps.Copy(ntargets, list.targets) return ntargets } diff --git a/internal/config/lambda/parse.go b/internal/config/lambda/parse.go index 156aa3525..eac6a5def 100644 --- a/internal/config/lambda/parse.go +++ b/internal/config/lambda/parse.go @@ -35,7 +35,7 @@ const ( logSubsys = "notify" ) -func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func logOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, logSubsys, err, id, errKind...) } diff --git a/internal/config/notify/parse.go b/internal/config/notify/parse.go index 8005c2ef2..b479d0d4d 100644 --- a/internal/config/notify/parse.go +++ b/internal/config/notify/parse.go @@ -45,7 +45,7 @@ const ( logSubsys = "notify" ) -func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func logOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, logSubsys, err, id, errKind...) } @@ -412,7 +412,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs if len(kafkaBrokers) == 0 { return nil, config.Errorf("kafka 'brokers' cannot be empty") } - for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) { + for s := range strings.SplitSeq(kafkaBrokers, config.ValueSeparator) { var host *xnet.Host host, err = xnet.ParseHost(s) if err != nil { diff --git a/internal/config/policy/opa/config.go b/internal/config/policy/opa/config.go index 2b5d0298f..47185fb87 100644 --- a/internal/config/policy/opa/config.go +++ b/internal/config/policy/opa/config.go @@ -170,7 +170,7 @@ func (o *Opa) IsAllowed(args policy.Args) (bool, error) { } // OPA input - body := make(map[string]interface{}) + body := make(map[string]any) body["input"] = args inputBytes, err := json.Marshal(body) diff --git a/internal/config/policy/plugin/config.go b/internal/config/policy/plugin/config.go index 93177aa87..6e651c5ad 100644 --- a/internal/config/policy/plugin/config.go +++ b/internal/config/policy/plugin/config.go @@ -185,7 +185,7 @@ func (o *AuthZPlugin) IsAllowed(args policy.Args) (bool, error) { } // Access Management Plugin Input - body := make(map[string]interface{}) + body := make(map[string]any) body["input"] = args inputBytes, err := json.Marshal(body) diff --git a/internal/config/storageclass/storage-class.go b/internal/config/storageclass/storage-class.go index 98dea3158..18121141a 100644 --- a/internal/config/storageclass/storage-class.go +++ b/internal/config/storageclass/storage-class.go @@ -147,7 +147,7 @@ func (sc *StorageClass) UnmarshalText(b []byte) error { // MarshalText - marshals storage class string. func (sc *StorageClass) MarshalText() ([]byte, error) { if sc.Parity != 0 { - return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil + return fmt.Appendf(nil, "%s:%d", schemePrefix, sc.Parity), nil } return []byte{}, nil } @@ -430,6 +430,6 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) { return cfg, nil } -func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "config", err, id, errKind...) } diff --git a/internal/config/subnet/subnet.go b/internal/config/subnet/subnet.go index 783dcdd04..c4ba01982 100644 --- a/internal/config/subnet/subnet.go +++ b/internal/config/subnet/subnet.go @@ -95,7 +95,7 @@ func (c Config) submitPost(r *http.Request) (string, error) { } // Post submit 'payload' to specified URL -func (c Config) Post(reqURL string, payload interface{}) (string, error) { +func (c Config) Post(reqURL string, payload any) (string, error) { if !c.Registered() { return "", errors.New("Deployment is not registered with SUBNET. Please register the deployment via 'mc license register ALIAS'") } diff --git a/internal/crypto/error.go b/internal/crypto/error.go index 72fb4674c..4d21b936e 100644 --- a/internal/crypto/error.go +++ b/internal/crypto/error.go @@ -32,7 +32,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type crypto.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { e := fmt.Errorf(format, a...) ee := Error{} ee.msg = e.Error() diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index 04ed6ccbb..7d6506eae 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -21,6 +21,7 @@ import ( "context" "errors" "math/rand" + "slices" "sort" "strconv" "sync" @@ -60,7 +61,7 @@ func init() { ) } -func log(format string, data ...interface{}) { +func log(format string, data ...any) { if dsyncLog { console.Printf(format, data...) } @@ -621,13 +622,7 @@ func (dm *DRWMutex) Unlock(ctx context.Context) { defer dm.m.Unlock() // Check if minimally a single bool is set in the writeLocks array - lockFound := false - for _, uid := range dm.writeLocks { - if isLocked(uid) { - lockFound = true - break - } - } + lockFound := slices.ContainsFunc(dm.writeLocks, isLocked) if !lockFound { panic("Trying to Unlock() while no Lock() is active") } @@ -672,13 +667,7 @@ func (dm *DRWMutex) RUnlock(ctx context.Context) { defer dm.m.Unlock() // Check if minimally a single bool is set in the writeLocks array - lockFound := false - for _, uid := range dm.readLocks { - if isLocked(uid) { - lockFound = true - break - } - } + lockFound := slices.ContainsFunc(dm.readLocks, isLocked) if !lockFound { panic("Trying to RUnlock() while no RLock() is active") } diff --git a/internal/dsync/drwmutex_test.go b/internal/dsync/drwmutex_test.go index ac32908fe..64e844d04 100644 --- a/internal/dsync/drwmutex_test.go +++ b/internal/dsync/drwmutex_test.go @@ -157,18 +157,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) { clocked := make(chan bool) cunlock := make(chan bool) cdone := make(chan bool) - for i := 0; i < numReaders; i++ { + for range numReaders { go parallelReader(context.Background(), m, clocked, cunlock, cdone) } // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { + for range numReaders { <-clocked } - for i := 0; i < numReaders; i++ { + for range numReaders { cunlock <- true } // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { + for range numReaders { <-cdone } } @@ -184,13 +184,13 @@ func TestParallelReaders(t *testing.T) { // Borrowed from rwmutex_test.go func reader(resource string, numIterations int, activity *int32, cdone chan bool) { rwm := NewDRWMutex(ds, resource) - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetRLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) { n := atomic.AddInt32(activity, 1) if n < 1 || n >= 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -1) rwm.RUnlock(context.Background()) @@ -202,13 +202,13 @@ func reader(resource string, numIterations int, activity *int32, cdone chan bool // Borrowed from rwmutex_test.go func writer(resource string, numIterations int, activity *int32, cdone chan bool) { rwm := NewDRWMutex(ds, resource) - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) { n := atomic.AddInt32(activity, 10000) if n != 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -10000) rwm.Unlock(context.Background()) diff --git a/internal/dsync/dsync-server_test.go b/internal/dsync/dsync-server_test.go index 0fa48f4d1..bdf7ac7bb 100644 --- a/internal/dsync/dsync-server_test.go +++ b/internal/dsync/dsync-server_test.go @@ -149,13 +149,13 @@ func (lh *lockServerHandler) RLockHandler(w http.ResponseWriter, r *http.Request } func stopLockServers() { - for i := 0; i < numberOfNodes; i++ { + for i := range numberOfNodes { nodes[i].Close() } } func startLockServers() { - for i := 0; i < numberOfNodes; i++ { + for i := range numberOfNodes { lsrv := &lockServer{ mutex: sync.Mutex{}, lockMap: make(map[string]int64), diff --git a/internal/dsync/dsync_test.go b/internal/dsync/dsync_test.go index 6999b6182..18b6c0ba8 100644 --- a/internal/dsync/dsync_test.go +++ b/internal/dsync/dsync_test.go @@ -42,7 +42,7 @@ func TestMain(m *testing.M) { // Initialize locker clients for dsync. var clnts []NetLocker - for i := 0; i < len(nodes); i++ { + for i := range nodes { clnts = append(clnts, newClient(nodes[i].URL)) } @@ -310,7 +310,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) { // Borrowed from mutex_test.go func HammerMutex(m *DRWMutex, loops int, cdone chan bool) { - for i := 0; i < loops; i++ { + for range loops { m.Lock(id, source) m.Unlock(context.Background()) } @@ -325,10 +325,10 @@ func TestMutex(t *testing.T) { } c := make(chan bool) m := NewDRWMutex(ds, "test") - for i := 0; i < 10; i++ { + for range 10 { go HammerMutex(m, loops, c) } - for i := 0; i < 10; i++ { + for range 10 { <-c } } @@ -363,7 +363,7 @@ func benchmarkMutex(b *testing.B, slack, work bool) { mu.Lock(id, source) mu.Unlock(b.Context()) if work { - for i := 0; i < 100; i++ { + for range 100 { foo *= 2 foo /= 2 } diff --git a/internal/event/config.go b/internal/event/config.go index dc3990184..de21764bf 100644 --- a/internal/event/config.go +++ b/internal/event/config.go @@ -30,7 +30,7 @@ import ( // ValidateFilterRuleValue - checks if given value is filter rule value or not. func ValidateFilterRuleValue(value string) error { - for _, segment := range strings.Split(value, "/") { + for segment := range strings.SplitSeq(value, "/") { if segment == "." || segment == ".." { return &ErrInvalidFilterValue{value} } @@ -139,7 +139,7 @@ func (ruleList FilterRuleList) Pattern() string { // S3Key - represents elements inside ... type S3Key struct { - RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"` + RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key"` } // MarshalXML implements a custom marshaller to support `omitempty` feature. diff --git a/internal/event/target/elasticsearch.go b/internal/event/target/elasticsearch.go index 5a4d61e41..5600b750a 100644 --- a/internal/event/target/elasticsearch.go +++ b/internal/event/target/elasticsearch.go @@ -427,13 +427,13 @@ func (c *esClientV7) getServerSupportStatus(ctx context.Context) (ESSupportStatu defer resp.Body.Close() - m := make(map[string]interface{}) + m := make(map[string]any) err = json.NewDecoder(resp.Body).Decode(&m) if err != nil { return ESSUnknown, "", fmt.Errorf("unable to get ES Server version - json parse error: %v", err) } - if v, ok := m["version"].(map[string]interface{}); ok { + if v, ok := m["version"].(map[string]any); ok { if ver, ok := v["number"].(string); ok { status, err := getESVersionSupportStatus(ver) return status, ver, err @@ -454,16 +454,16 @@ func (c *esClientV7) createIndex(args ElasticsearchArgs) error { } defer res.Body.Close() - var v map[string]interface{} + var v map[string]any found := false if err := json.NewDecoder(res.Body).Decode(&v); err != nil { return fmt.Errorf("Error parsing response body: %v", err) } - indices, ok := v["indices"].([]interface{}) + indices, ok := v["indices"].([]any) if ok { for _, index := range indices { - if name, ok := index.(map[string]interface{}); ok && name["name"] == args.Index { + if name, ok := index.(map[string]any); ok && name["name"] == args.Index { found = true break } @@ -529,7 +529,7 @@ func (c *esClientV7) removeEntry(ctx context.Context, index string, key string) } func (c *esClientV7) updateEntry(ctx context.Context, index string, key string, eventData event.Event) error { - doc := map[string]interface{}{ + doc := map[string]any{ "Records": []event.Event{eventData}, } var buf bytes.Buffer @@ -556,7 +556,7 @@ func (c *esClientV7) updateEntry(ctx context.Context, index string, key string, } func (c *esClientV7) addEntry(ctx context.Context, index string, eventData event.Event) error { - doc := map[string]interface{}{ + doc := map[string]any{ "Records": []event.Event{eventData}, } var buf bytes.Buffer diff --git a/internal/event/target/mysql_test.go b/internal/event/target/mysql_test.go index 9a59b28d5..d8799dc73 100644 --- a/internal/event/target/mysql_test.go +++ b/internal/event/target/mysql_test.go @@ -19,6 +19,7 @@ package target import ( "database/sql" + "slices" "testing" ) @@ -26,11 +27,8 @@ import ( // is registered and fails otherwise. func TestMySQLRegistration(t *testing.T) { var found bool - for _, drv := range sql.Drivers() { - if drv == "mysql" { - found = true - break - } + if slices.Contains(sql.Drivers(), "mysql") { + found = true } if !found { t.Fatal("mysql driver not registered") diff --git a/internal/event/target/postgresql_test.go b/internal/event/target/postgresql_test.go index cd03c7134..9b5130e2e 100644 --- a/internal/event/target/postgresql_test.go +++ b/internal/event/target/postgresql_test.go @@ -19,6 +19,7 @@ package target import ( "database/sql" + "slices" "testing" ) @@ -26,11 +27,8 @@ import ( // is registered and fails otherwise. func TestPostgreSQLRegistration(t *testing.T) { var found bool - for _, drv := range sql.Drivers() { - if drv == "postgres" { - found = true - break - } + if slices.Contains(sql.Drivers(), "postgres") { + found = true } if !found { t.Fatal("postgres driver not registered") diff --git a/internal/event/targetidset.go b/internal/event/targetidset.go index eb6e0687e..88fa91770 100644 --- a/internal/event/targetidset.go +++ b/internal/event/targetidset.go @@ -17,15 +17,15 @@ package event +import "maps" + // TargetIDSet - Set representation of TargetIDs. type TargetIDSet map[TargetID]struct{} // Clone - returns copy of this set. func (set TargetIDSet) Clone() TargetIDSet { setCopy := NewTargetIDSet() - for k, v := range set { - setCopy[k] = v - } + maps.Copy(setCopy, set) return setCopy } diff --git a/internal/event/targetlist.go b/internal/event/targetlist.go index 28eff2b18..3aeee5d26 100644 --- a/internal/event/targetlist.go +++ b/internal/event/targetlist.go @@ -20,6 +20,7 @@ package event import ( "context" "fmt" + "maps" "runtime" "sync" "sync/atomic" @@ -252,9 +253,7 @@ func (list *TargetList) TargetMap() map[TargetID]Target { defer list.RUnlock() ntargets := make(map[TargetID]Target, len(list.targets)) - for k, v := range list.targets { - ntargets[k] = v - } + maps.Copy(ntargets, list.targets) return ntargets } diff --git a/internal/grid/benchmark_test.go b/internal/grid/benchmark_test.go index e8c38343a..9bd17d1a9 100644 --- a/internal/grid/benchmark_test.go +++ b/internal/grid/benchmark_test.go @@ -231,7 +231,7 @@ func benchmarkGridStreamRespOnly(b *testing.B, n int) { errFatal(remote.RegisterStreamingHandler(handlerTest, StreamHandler{ // Send 10x response. Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, out chan<- []byte) *RemoteErr { - for i := 0; i < responses; i++ { + for i := range responses { toSend := GetByteBuffer()[:0] toSend = append(toSend, byte(i)) toSend = append(toSend, payload...) @@ -407,7 +407,7 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) { b.Fatal(err.Error()) } got := 0 - for i := 0; i < requests; i++ { + for range requests { got++ st.Requests <- append(GetByteBuffer()[:0], payload...) } @@ -525,7 +525,7 @@ func benchmarkGridStreamTwoway(b *testing.B, n int) { got := 0 sent := 0 go func() { - for i := 0; i < messages; i++ { + for range messages { st.Requests <- append(GetByteBuffer()[:0], payload...) if sent++; sent == messages { close(st.Requests) diff --git a/internal/grid/connection.go b/internal/grid/connection.go index 522568278..576f4229a 100644 --- a/internal/grid/connection.go +++ b/internal/grid/connection.go @@ -47,7 +47,7 @@ import ( "github.com/zeebo/xxh3" ) -func gridLogIf(ctx context.Context, err error, errKind ...interface{}) { +func gridLogIf(ctx context.Context, err error, errKind ...any) { logger.LogIf(ctx, "grid", err, errKind...) } @@ -55,7 +55,7 @@ func gridLogIfNot(ctx context.Context, err error, ignored ...error) { logger.LogIfNot(ctx, "grid", err, ignored...) } -func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { logger.LogOnceIf(ctx, "grid", err, id, errKind...) } @@ -659,10 +659,7 @@ func (c *Connection) connect() { } sleep := defaultDialTimeout + time.Duration(rng.Int63n(int64(defaultDialTimeout))) next := dialStarted.Add(sleep / 2) - sleep = time.Until(next).Round(time.Millisecond) - if sleep < 0 { - sleep = 0 - } + sleep = max(time.Until(next).Round(time.Millisecond), 0) gotState := c.State() if gotState == StateShutdown { return diff --git a/internal/grid/grid_test.go b/internal/grid/grid_test.go index 62c71b33d..3d1156e68 100644 --- a/internal/grid/grid_test.go +++ b/internal/grid/grid_test.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "maps" "os" "runtime" "strconv" @@ -266,9 +267,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) { // Handles incoming requests, returns a response handler1 := func(req *MSS) (resp *MSS, err *RemoteErr) { resp = h1.NewResponse() - for k, v := range *req { - (*resp)[k] = v - } + maps.Copy((*resp), *req) return resp, nil } // Return error @@ -708,7 +707,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) { Handle: func(ctx context.Context, payload []byte, request <-chan []byte, resp chan<- []byte) *RemoteErr { // Send many responses. // Test that this doesn't block. - for i := byte(0); i < 100; i++ { + for i := range byte(100) { select { case resp <- []byte{i}: // ok @@ -744,7 +743,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) { <-serverSent // Now do 100 other requests to ensure that the server doesn't block. - for i := 0; i < 100; i++ { + for range 100 { _, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload)) errFatal(err) } @@ -820,13 +819,13 @@ func testServerInCongestion(t *testing.T, local, remote *Manager) { // Start sending requests. go func() { - for i := byte(0); i < 100; i++ { + for i := range byte(100) { st.Requests <- []byte{i} } close(st.Requests) }() // Now do 100 other requests to ensure that the server doesn't block. - for i := 0; i < 100; i++ { + for range 100 { _, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload)) errFatal(err) } @@ -897,7 +896,7 @@ func testGenericsStreamRoundtrip(t *testing.T, local, remote *Manager) { errFatal(err) go func() { defer close(stream.Requests) - for i := 0; i < payloads; i++ { + for i := range payloads { // t.Log("sending new client request") stream.Requests <- &testRequest{Num: i, String: testPayload} } @@ -974,7 +973,7 @@ func testGenericsStreamRoundtripSubroute(t *testing.T, local, remote *Manager) { errFatal(err) go func() { defer close(stream.Requests) - for i := 0; i < payloads; i++ { + for i := range payloads { // t.Log("sending new client request") stream.Requests <- &testRequest{Num: i, String: testPayload} } @@ -1019,7 +1018,7 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) { Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, resp chan<- []byte) *RemoteErr { // Send many responses. // Test that this doesn't block. - for i := byte(0); i < 100; i++ { + for i := range byte(100) { select { case resp <- []byte{i}: // ok diff --git a/internal/grid/types.go b/internal/grid/types.go index 41ea2d61d..d0ad7d340 100644 --- a/internal/grid/types.go +++ b/internal/grid/types.go @@ -411,7 +411,7 @@ func NewJSONPool[T any]() *JSONPool[T] { } return &JSONPool[T]{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { var t T return &t }, @@ -700,7 +700,7 @@ func (j *Array[T]) UnmarshalMsg(bytes []byte) ([]byte, error) { } else { j.val = j.val[:0] } - for i := uint32(0); i < l; i++ { + for range l { v := j.p.newE() bytes, err = v.UnmarshalMsg(bytes) if err != nil { diff --git a/internal/grid/types_test.go b/internal/grid/types_test.go index 43899de28..f60fecd17 100644 --- a/internal/grid/types_test.go +++ b/internal/grid/types_test.go @@ -81,8 +81,8 @@ func TestMarshalUnmarshalMSSNil(t *testing.T) { func BenchmarkMarshalMsgMSS(b *testing.B) { v := MSS{"abc": "def", "ghi": "jkl"} b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { v.MarshalMsg(nil) } } @@ -93,8 +93,8 @@ func BenchmarkAppendMsgMSS(b *testing.B) { bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { bts, _ = v.MarshalMsg(bts[0:0]) } } @@ -104,8 +104,8 @@ func BenchmarkUnmarshalMSS(b *testing.B) { bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { _, err := v.UnmarshalMsg(bts) if err != nil { b.Fatal(err) diff --git a/internal/http/listener_test.go b/internal/http/listener_test.go index 2f55f5815..b1ffdb7a0 100644 --- a/internal/http/listener_test.go +++ b/internal/http/listener_test.go @@ -227,7 +227,7 @@ func TestHTTPListenerAddr(t *testing.T) { nonLoopBackIP := getNonLoopBackIP(t) var casePorts []string - for i := 0; i < 6; i++ { + for range 6 { casePorts = append(casePorts, getNextPort()) } @@ -278,7 +278,7 @@ func TestHTTPListenerAddrs(t *testing.T) { nonLoopBackIP := getNonLoopBackIP(t) var casePorts []string - for i := 0; i < 6; i++ { + for range 6 { casePorts = append(casePorts, getNextPort()) } diff --git a/internal/jwt/parser.go b/internal/jwt/parser.go index 831d19cf0..a8e2fd0b0 100644 --- a/internal/jwt/parser.go +++ b/internal/jwt/parser.go @@ -246,7 +246,7 @@ func NewMapClaims() *MapClaims { } // Set Adds new arbitrary claim keys and values. -func (c *MapClaims) Set(key string, val interface{}) { +func (c *MapClaims) Set(key string, val any) { if c == nil { return } @@ -266,7 +266,7 @@ func (c *MapClaims) Lookup(key string) (value string, ok bool) { if c == nil { return "", false } - var vinterface interface{} + var vinterface any vinterface, ok = c.MapClaims[key] if ok { value, ok = vinterface.(string) @@ -302,7 +302,7 @@ func (c *MapClaims) Valid() error { } // Map returns underlying low-level map claims. -func (c *MapClaims) Map() map[string]interface{} { +func (c *MapClaims) Map() map[string]any { if c == nil { return nil } diff --git a/internal/jwt/parser_test.go b/internal/jwt/parser_test.go index 9fc6889e9..a521bfcfc 100644 --- a/internal/jwt/parser_test.go +++ b/internal/jwt/parser_test.go @@ -176,7 +176,6 @@ func standardClaimsToken(claims *StandardClaims) string { func TestParserParse(t *testing.T) { // Iterate over test data set and run tests for _, data := range jwtTestData { - data := data t.Run(data.name, func(t *testing.T) { // Parse the token var err error diff --git a/internal/kms/config.go b/internal/kms/config.go index 7de651704..158bd0d60 100644 --- a/internal/kms/config.go +++ b/internal/kms/config.go @@ -419,7 +419,7 @@ func IsPresent() (bool, error) { func expandEndpoints(s string) ([]string, error) { var endpoints []string - for _, endpoint := range strings.Split(s, ",") { + for endpoint := range strings.SplitSeq(s, ",") { endpoint = strings.TrimSpace(endpoint) if endpoint == "" { continue diff --git a/internal/logger/audit.go b/internal/logger/audit.go index dc71edf06..62af94cc5 100644 --- a/internal/logger/audit.go +++ b/internal/logger/audit.go @@ -60,7 +60,7 @@ func GetAuditEntry(ctx context.Context) *audit.Entry { } // AuditLog - logs audit logs to all audit targets. -func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, filterKeys ...string) { +func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]any, filterKeys ...string) { auditTgts := AuditTargets() if len(auditTgts) == 0 { return @@ -124,7 +124,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl entry.API.TimeToResponse = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) + "ns" entry.API.TimeToResponseInNS = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) // We hold the lock, so we cannot call reqInfo.GetTagsMap(). - tags := make(map[string]interface{}, len(reqInfo.tags)) + tags := make(map[string]any, len(reqInfo.tags)) for _, t := range reqInfo.tags { tags[t.Key] = t.Val } diff --git a/internal/logger/config.go b/internal/logger/config.go index 592bd4f7c..5cc71d0d9 100644 --- a/internal/logger/config.go +++ b/internal/logger/config.go @@ -389,7 +389,7 @@ func lookupAuditKafkaConfig(scfg config.Config, cfg Config) (Config, error) { if len(kafkaBrokers) == 0 { return cfg, config.Errorf("kafka 'brokers' cannot be empty") } - for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) { + for s := range strings.SplitSeq(kafkaBrokers, config.ValueSeparator) { var host *xnet.Host host, err = xnet.ParseHost(s) if err != nil { diff --git a/internal/logger/console.go b/internal/logger/console.go index f87c4e314..d2a022673 100644 --- a/internal/logger/console.go +++ b/internal/logger/console.go @@ -36,12 +36,12 @@ var ExitFunc = os.Exit // Logger interface describes the methods that need to be implemented to satisfy the interface requirements. type Logger interface { - json(msg string, args ...interface{}) - quiet(msg string, args ...interface{}) - pretty(msg string, args ...interface{}) + json(msg string, args ...any) + quiet(msg string, args ...any) + pretty(msg string, args ...any) } -func consoleLog(console Logger, msg string, args ...interface{}) { +func consoleLog(console Logger, msg string, args ...any) { switch { case jsonFlag: // Strip escape control characters from json message @@ -64,11 +64,11 @@ func consoleLog(console Logger, msg string, args ...interface{}) { // Fatal prints only fatal error message with no stack trace // it will be called for input validation failures -func Fatal(err error, msg string, data ...interface{}) { +func Fatal(err error, msg string, data ...any) { fatal(err, msg, data...) } -func fatal(err error, msg string, data ...interface{}) { +func fatal(err error, msg string, data ...any) { if msg == "" { if len(data) > 0 { msg = fmt.Sprint(data...) @@ -85,7 +85,7 @@ var fatalMessage fatalMsg type fatalMsg struct{} -func (f fatalMsg) json(msg string, args ...interface{}) { +func (f fatalMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -105,7 +105,7 @@ func (f fatalMsg) json(msg string, args ...interface{}) { ExitFunc(1) } -func (f fatalMsg) quiet(msg string, args ...interface{}) { +func (f fatalMsg) quiet(msg string, args ...any) { f.pretty(msg, args...) } @@ -116,7 +116,7 @@ var ( bannerWidth = len(logTag) + 1 ) -func (f fatalMsg) pretty(msg string, args ...interface{}) { +func (f fatalMsg) pretty(msg string, args ...any) { // Build the passed error message errMsg := fmt.Sprintf(msg, args...) @@ -128,30 +128,27 @@ func (f fatalMsg) pretty(msg string, args ...interface{}) { // message itself contains some colored text, we needed // to use some ANSI control escapes to cursor color state // and freely move in the screen. - for _, line := range strings.Split(errMsg, "\n") { + for line := range strings.SplitSeq(errMsg, "\n") { if len(line) == 0 { // No more text to print, just quit. break } - for { - // Save the attributes of the current cursor helps - // us save the text color of the passed error message - ansiSaveAttributes() - // Print banner with or without the log tag - if !tagPrinted { - fmt.Fprint(Output, logBanner) - tagPrinted = true - } else { - fmt.Fprint(Output, emptyBanner) - } - // Restore the text color of the error message - ansiRestoreAttributes() - ansiMoveRight(bannerWidth) - // Continue error message printing - fmt.Fprintln(Output, line) - break + // Save the attributes of the current cursor helps + // us save the text color of the passed error message + ansiSaveAttributes() + // Print banner with or without the log tag + if !tagPrinted { + fmt.Fprint(Output, logBanner) + tagPrinted = true + } else { + fmt.Fprint(Output, emptyBanner) } + // Restore the text color of the error message + ansiRestoreAttributes() + ansiMoveRight(bannerWidth) + // Continue error message printing + fmt.Fprintln(Output, line) } // Exit because this is a fatal error message @@ -162,7 +159,7 @@ type infoMsg struct{} var info infoMsg -func (i infoMsg) json(msg string, args ...interface{}) { +func (i infoMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -180,10 +177,10 @@ func (i infoMsg) json(msg string, args ...interface{}) { fmt.Fprintln(Output, string(logJSON)) } -func (i infoMsg) quiet(msg string, args ...interface{}) { +func (i infoMsg) quiet(msg string, args ...any) { } -func (i infoMsg) pretty(msg string, args ...interface{}) { +func (i infoMsg) pretty(msg string, args ...any) { if msg == "" { fmt.Fprintln(Output, args...) } else { @@ -195,7 +192,7 @@ type errorMsg struct{} var errorMessage errorMsg -func (i errorMsg) json(msg string, args ...interface{}) { +func (i errorMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -214,11 +211,11 @@ func (i errorMsg) json(msg string, args ...interface{}) { fmt.Fprintln(Output, string(logJSON)) } -func (i errorMsg) quiet(msg string, args ...interface{}) { +func (i errorMsg) quiet(msg string, args ...any) { i.pretty(msg, args...) } -func (i errorMsg) pretty(msg string, args ...interface{}) { +func (i errorMsg) pretty(msg string, args ...any) { if msg == "" { fmt.Fprintln(Output, args...) } else { @@ -227,7 +224,7 @@ func (i errorMsg) pretty(msg string, args ...interface{}) { } // Error : -func Error(msg string, data ...interface{}) { +func Error(msg string, data ...any) { if DisableLog { return } @@ -235,7 +232,7 @@ func Error(msg string, data ...interface{}) { } // Info : -func Info(msg string, data ...interface{}) { +func Info(msg string, data ...any) { if DisableLog { return } @@ -243,7 +240,7 @@ func Info(msg string, data ...interface{}) { } // Startup : -func Startup(msg string, data ...interface{}) { +func Startup(msg string, data ...any) { if DisableLog { return } @@ -254,7 +251,7 @@ type startupMsg struct{} var startup startupMsg -func (i startupMsg) json(msg string, args ...interface{}) { +func (i startupMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -272,10 +269,10 @@ func (i startupMsg) json(msg string, args ...interface{}) { fmt.Fprintln(Output, string(logJSON)) } -func (i startupMsg) quiet(msg string, args ...interface{}) { +func (i startupMsg) quiet(msg string, args ...any) { } -func (i startupMsg) pretty(msg string, args ...interface{}) { +func (i startupMsg) pretty(msg string, args ...any) { if msg == "" { fmt.Fprintln(Output, args...) } else { @@ -287,7 +284,7 @@ type warningMsg struct{} var warningMessage warningMsg -func (i warningMsg) json(msg string, args ...interface{}) { +func (i warningMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -306,11 +303,11 @@ func (i warningMsg) json(msg string, args ...interface{}) { fmt.Fprintln(Output, string(logJSON)) } -func (i warningMsg) quiet(msg string, args ...interface{}) { +func (i warningMsg) quiet(msg string, args ...any) { i.pretty(msg, args...) } -func (i warningMsg) pretty(msg string, args ...interface{}) { +func (i warningMsg) pretty(msg string, args ...any) { if msg == "" { fmt.Fprintln(Output, args...) } else { @@ -319,7 +316,7 @@ func (i warningMsg) pretty(msg string, args ...interface{}) { } // Warning : -func Warning(msg string, data ...interface{}) { +func Warning(msg string, data ...any) { if DisableLog { return } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 978f32332..09573ab49 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -254,7 +254,7 @@ func HashString(input string) string { // LogAlwaysIf prints a detailed error message during // the execution of the server. -func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { +func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...any) { if err == nil { return } @@ -264,7 +264,7 @@ func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...in // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. -func LogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { +func LogIf(ctx context.Context, subsystem string, err error, errKind ...any) { if logIgnoreError(err) { return } @@ -285,7 +285,7 @@ func LogIfNot(ctx context.Context, subsystem string, err error, ignored ...error logIf(ctx, subsystem, err) } -func errToEntry(ctx context.Context, subsystem string, err error, errKind ...interface{}) log.Entry { +func errToEntry(ctx context.Context, subsystem string, err error, errKind ...any) log.Entry { var l string if anonFlag { l = reflect.TypeOf(err).String() @@ -295,11 +295,11 @@ func errToEntry(ctx context.Context, subsystem string, err error, errKind ...int return buildLogEntry(ctx, subsystem, l, getTrace(3), errKind...) } -func logToEntry(ctx context.Context, subsystem, message string, errKind ...interface{}) log.Entry { +func logToEntry(ctx context.Context, subsystem, message string, errKind ...any) log.Entry { return buildLogEntry(ctx, subsystem, message, nil, errKind...) } -func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...interface{}) log.Entry { +func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...any) log.Entry { logKind := madmin.LogKindError if len(errKind) > 0 { if ek, ok := errKind[0].(madmin.LogKind); ok { @@ -326,7 +326,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin } // Copy tags. We hold read lock already. - tags := make(map[string]interface{}, len(req.tags)) + tags := make(map[string]any, len(req.tags)) for _, entry := range req.tags { tags[entry.Key] = entry.Val } @@ -379,7 +379,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin entry.API.Args.Object = HashString(entry.API.Args.Object) entry.RemoteHost = HashString(entry.RemoteHost) if entry.Trace != nil { - entry.Trace.Variables = make(map[string]interface{}) + entry.Trace.Variables = make(map[string]any) } } @@ -388,7 +388,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin // consoleLogIf prints a detailed error message during // the execution of the server. -func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { +func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...any) { if DisableLog { return } @@ -403,7 +403,7 @@ func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...i // logIf prints a detailed error message during // the execution of the server. -func logIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) { +func logIf(ctx context.Context, subsystem string, err error, errKind ...any) { if DisableLog { return } @@ -431,7 +431,7 @@ func sendLog(ctx context.Context, entry log.Entry) { } // Event sends a event log to log targets -func Event(ctx context.Context, subsystem, msg string, args ...interface{}) { +func Event(ctx context.Context, subsystem, msg string, args ...any) { if DisableLog { return } @@ -444,7 +444,7 @@ var ErrCritical struct{} // CriticalIf logs the provided error on the console. It fails the // current go-routine by causing a `panic(ErrCritical)`. -func CriticalIf(ctx context.Context, err error, errKind ...interface{}) { +func CriticalIf(ctx context.Context, err error, errKind ...any) { if err != nil { LogIf(ctx, "", err, errKind...) panic(ErrCritical) @@ -452,7 +452,7 @@ func CriticalIf(ctx context.Context, err error, errKind ...interface{}) { } // FatalIf is similar to Fatal() but it ignores passed nil error -func FatalIf(err error, msg string, data ...interface{}) { +func FatalIf(err error, msg string, data ...any) { if err == nil { return } diff --git a/internal/logger/logonce.go b/internal/logger/logonce.go index 12b220d9f..319e57b9a 100644 --- a/internal/logger/logonce.go +++ b/internal/logger/logonce.go @@ -25,7 +25,7 @@ import ( ) // LogOnce provides the function type for logger.LogOnceIf() function -type LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) +type LogOnce func(ctx context.Context, err error, id string, errKind ...any) type onceErr struct { Err error @@ -38,7 +38,7 @@ type logOnceType struct { sync.Mutex } -func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { +func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if err == nil { return } @@ -92,7 +92,7 @@ func unwrapErrs(err error) (leafErr error) { } // One log message per error. -func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { +func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if err == nil { return } @@ -142,7 +142,7 @@ var logOnce = newLogOnceType() // LogOnceIf - Logs notification errors - once per error. // id is a unique identifier for related log messages, refer to cmd/notification.go // on how it is used. -func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { +func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if logIgnoreError(err) { return } @@ -150,7 +150,7 @@ func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errK } // LogOnceConsoleIf - similar to LogOnceIf but exclusively only logs to console target. -func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) { +func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if logIgnoreError(err) { return } diff --git a/internal/logger/message/audit/entry.go b/internal/logger/message/audit/entry.go index 600c4f738..4ef635f24 100644 --- a/internal/logger/message/audit/entry.go +++ b/internal/logger/message/audit/entry.go @@ -41,7 +41,7 @@ func NewEntry(deploymentID string) audit.Entry { } // ToEntry - constructs an audit entry from a http request -func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, deploymentID string) audit.Entry { +func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]any, deploymentID string) audit.Entry { entry := NewEntry(deploymentID) entry.RemoteHost = handlers.GetSourceIP(r) diff --git a/internal/logger/target/console/console.go b/internal/logger/target/console/console.go index 9a7535e7e..a3b6dce00 100644 --- a/internal/logger/target/console/console.go +++ b/internal/logger/target/console/console.go @@ -50,7 +50,7 @@ func (c *Target) String() string { } // Send log message 'e' to console -func (c *Target) Send(e interface{}) error { +func (c *Target) Send(e any) error { entry, ok := e.(log.Entry) if !ok { return fmt.Errorf("Uexpected log entry structure %#v", e) diff --git a/internal/logger/target/http/http.go b/internal/logger/target/http/http.go index a877cf162..0b0277845 100644 --- a/internal/logger/target/http/http.go +++ b/internal/logger/target/http/http.go @@ -61,7 +61,7 @@ const ( ) var ( - logChBuffers = make(map[string]chan interface{}) + logChBuffers = make(map[string]chan any) logChLock = sync.Mutex{} ) @@ -84,7 +84,7 @@ type Config struct { HTTPTimeout time.Duration `json:"httpTimeout"` // Custom logger - LogOnceIf func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"` + LogOnceIf func(ctx context.Context, err error, id string, errKind ...any) `json:"-"` } // Target implements logger.Target and sends the json @@ -109,7 +109,7 @@ type Target struct { // Channel of log entries. // Reading logCh must hold read lock on logChMu (to avoid read race) // Sending a value on logCh must hold read lock on logChMu (to avoid closing) - logCh chan interface{} + logCh chan any logChMu sync.RWMutex // If this webhook is being re-configured we will @@ -131,7 +131,7 @@ type Target struct { // store to persist and replay the logs to the target // to avoid missing events when the target is down. - store store.Store[interface{}] + store store.Store[any] storeCtxCancel context.CancelFunc initQueueOnce once.Init @@ -199,7 +199,7 @@ func (h *Target) initDiskStore(ctx context.Context) (err error) { h.lastStarted = time.Now() go h.startQueueProcessor(ctx, true) - queueStore := store.NewQueueStore[interface{}]( + queueStore := store.NewQueueStore[any]( filepath.Join(h.config.QueueDir, h.Name()), uint64(h.config.QueueSize), httpLoggerExtension, @@ -289,7 +289,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) { h.wg.Add(1) defer h.wg.Done() - entries := make([]interface{}, 0) + entries := make([]any, 0) name := h.Name() defer func() { @@ -455,7 +455,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) { } } - entries = make([]interface{}, 0) + entries = make([]any, 0) count = 0 if !isDirQueue { buf.Reset() @@ -481,7 +481,7 @@ func CreateOrAdjustGlobalBuffer(currentTgt *Target, newTgt *Target) { currentBuff, ok := logChBuffers[name] if !ok { - logChBuffers[name] = make(chan interface{}, requiredCap) + logChBuffers[name] = make(chan any, requiredCap) currentCap = requiredCap } else { currentCap = cap(currentBuff) @@ -489,7 +489,7 @@ func CreateOrAdjustGlobalBuffer(currentTgt *Target, newTgt *Target) { } if requiredCap > currentCap { - logChBuffers[name] = make(chan interface{}, requiredCap) + logChBuffers[name] = make(chan any, requiredCap) if len(currentBuff) > 0 { drain: @@ -519,7 +519,7 @@ func New(config Config) (*Target, error) { } h := &Target{ - logCh: make(chan interface{}, config.QueueSize), + logCh: make(chan any, config.QueueSize), config: config, batchSize: config.BatchSize, maxWorkers: int64(maxWorkers), @@ -579,7 +579,7 @@ func (h *Target) SendFromStore(key store.Key) (err error) { // Send the log message 'entry' to the http target. // Messages are queued in the disk if the store is enabled // If Cancel has been called the message is ignored. -func (h *Target) Send(ctx context.Context, entry interface{}) error { +func (h *Target) Send(ctx context.Context, entry any) error { if h.status.Load() == statusClosed { if h.migrateTarget != nil { return h.migrateTarget.Send(ctx, entry) diff --git a/internal/logger/target/kafka/kafka.go b/internal/logger/target/kafka/kafka.go index 2fb488914..0baed1363 100644 --- a/internal/logger/target/kafka/kafka.go +++ b/internal/logger/target/kafka/kafka.go @@ -75,7 +75,7 @@ type Config struct { QueueDir string `json:"queueDir"` // Custom logger - LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"` + LogOnce func(ctx context.Context, err error, id string, errKind ...any) `json:"-"` } // Target - Kafka target. @@ -90,12 +90,12 @@ type Target struct { // Channel of log entries. // Reading logCh must hold read lock on logChMu (to avoid read race) // Sending a value on logCh must hold read lock on logChMu (to avoid closing) - logCh chan interface{} + logCh chan any logChMu sync.RWMutex // store to persist and replay the logs to the target // to avoid missing events when the target is down. - store store.Store[interface{}] + store store.Store[any] storeCtxCancel context.CancelFunc initKafkaOnce once.Init @@ -170,7 +170,7 @@ func (h *Target) Init(ctx context.Context) error { func (h *Target) initQueueStore(ctx context.Context) (err error) { queueDir := filepath.Join(h.kconfig.QueueDir, h.Name()) - queueStore := store.NewQueueStore[interface{}](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension) + queueStore := store.NewQueueStore[any](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension) if err = queueStore.Open(); err != nil { return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err) } @@ -202,7 +202,7 @@ func (h *Target) startKafkaLogger() { } } -func (h *Target) logEntry(entry interface{}) { +func (h *Target) logEntry(entry any) { atomic.AddInt64(&h.totalMessages, 1) if err := h.send(entry); err != nil { atomic.AddInt64(&h.failedMessages, 1) @@ -210,7 +210,7 @@ func (h *Target) logEntry(entry interface{}) { } } -func (h *Target) send(entry interface{}) error { +func (h *Target) send(entry any) error { if err := h.initKafkaOnce.Do(h.init); err != nil { return err } @@ -311,7 +311,7 @@ func (h *Target) IsOnline(_ context.Context) bool { } // Send log message 'e' to kafka target. -func (h *Target) Send(ctx context.Context, entry interface{}) error { +func (h *Target) Send(ctx context.Context, entry any) error { if h.store != nil { // save the entry to the queue store which will be replayed to the target. _, err := h.store.Put(entry) @@ -391,7 +391,7 @@ func (h *Target) Cancel() { // sends log over http to the specified endpoint func New(config Config) *Target { target := &Target{ - logCh: make(chan interface{}, config.QueueSize), + logCh: make(chan any, config.QueueSize), kconfig: config, status: statusOffline, } diff --git a/internal/logger/target/testlogger/testlogger.go b/internal/logger/target/testlogger/testlogger.go index d2b4149a0..68f0f82dd 100644 --- a/internal/logger/target/testlogger/testlogger.go +++ b/internal/logger/target/testlogger/testlogger.go @@ -113,7 +113,7 @@ func (t *testLogger) Cancel() { t.current.Store(nil) } -func (t *testLogger) Send(ctx context.Context, entry interface{}) error { +func (t *testLogger) Send(ctx context.Context, entry any) error { tb := t.current.Load() var logf func(format string, args ...any) if tb != nil { diff --git a/internal/logger/targets.go b/internal/logger/targets.go index b4df2e5c8..16df63f51 100644 --- a/internal/logger/targets.go +++ b/internal/logger/targets.go @@ -39,7 +39,7 @@ type Target interface { Init(ctx context.Context) error IsOnline(ctx context.Context) bool Cancel() - Send(ctx context.Context, entry interface{}) error + Send(ctx context.Context, entry any) error Type() types.TargetType } diff --git a/internal/logger/utils.go b/internal/logger/utils.go index b0fdfdd22..0a10949cd 100644 --- a/internal/logger/utils.go +++ b/internal/logger/utils.go @@ -31,7 +31,7 @@ import ( var ansiRE = regexp.MustCompile("(\x1b[^m]*m)") // Print ANSI Control escape -func ansiEscape(format string, args ...interface{}) { +func ansiEscape(format string, args ...any) { Esc := "\x1b" fmt.Printf("%s%s", Esc, fmt.Sprintf(format, args...)) } diff --git a/internal/lsync/lrwmutex_test.go b/internal/lsync/lrwmutex_test.go index 46c6c8bdd..96dbe1151 100644 --- a/internal/lsync/lrwmutex_test.go +++ b/internal/lsync/lrwmutex_test.go @@ -152,18 +152,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) { clocked := make(chan bool) cunlock := make(chan bool) cdone := make(chan bool) - for i := 0; i < numReaders; i++ { + for range numReaders { go parallelReader(context.Background(), m, clocked, cunlock, cdone) } // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { + for range numReaders { <-clocked } - for i := 0; i < numReaders; i++ { + for range numReaders { cunlock <- true } // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { + for range numReaders { <-cdone } } @@ -178,13 +178,13 @@ func TestParallelReaders(t *testing.T) { // Borrowed from rwmutex_test.go func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetRLock(context.Background(), "", "", time.Second) { n := atomic.AddInt32(activity, 1) if n < 1 || n >= 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -1) rwm.RUnlock() @@ -195,13 +195,13 @@ func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) // Borrowed from rwmutex_test.go func writer(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetLock(context.Background(), "", "", time.Second) { n := atomic.AddInt32(activity, 10000) if n != 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -10000) rwm.Unlock() @@ -260,7 +260,7 @@ func TestDRLocker(t *testing.T) { rl = wl.DRLocker() n := 10 go func() { - for i := 0; i < n; i++ { + for range n { rl.Lock() rl.Lock() rlocked <- true @@ -268,7 +268,7 @@ func TestDRLocker(t *testing.T) { wlocked <- true } }() - for i := 0; i < n; i++ { + for range n { <-rlocked rl.Unlock() select { diff --git a/internal/rest/client.go b/internal/rest/client.go index 4cbfb765c..aba96aaa5 100644 --- a/internal/rest/client.go +++ b/internal/rest/client.go @@ -458,10 +458,7 @@ func exponentialBackoffWait(r *rand.Rand, unit, maxSleep time.Duration) func(uin attempt = 16 } // sleep = random_between(unit, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< maxSleep { - sleep = maxSleep - } + sleep := min(unit*time.Duration(1< r.r { - n = r.w - r.r - if n > len(p) { - n = len(p) - } + n = min(r.w-r.r, len(p)) copy(p, r.buf[r.r:r.r+n]) r.r = (r.r + n) % r.size return } - n = r.size - r.r + r.w - if n > len(p) { - n = len(p) - } + n = min(r.size-r.r+r.w, len(p)) if r.r+n <= r.size { copy(p, r.buf[r.r:r.r+n]) diff --git a/internal/ringbuffer/ring_buffer_benchmark_test.go b/internal/ringbuffer/ring_buffer_benchmark_test.go index 65f34571b..5de127add 100644 --- a/internal/ringbuffer/ring_buffer_benchmark_test.go +++ b/internal/ringbuffer/ring_buffer_benchmark_test.go @@ -11,8 +11,7 @@ func BenchmarkRingBuffer_Sync(b *testing.B) { data := []byte(strings.Repeat("a", 512)) buf := make([]byte, 512) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { rb.Write(data) rb.Read(buf) } @@ -30,8 +29,7 @@ func BenchmarkRingBuffer_AsyncRead(b *testing.B) { } }() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { rb.Write(data) } } @@ -50,8 +48,7 @@ func BenchmarkRingBuffer_AsyncReadBlocking(b *testing.B) { } }() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { rb.Write(data) } } @@ -67,8 +64,7 @@ func BenchmarkRingBuffer_AsyncWrite(b *testing.B) { } }() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { rb.Read(buf) } } @@ -87,8 +83,7 @@ func BenchmarkRingBuffer_AsyncWriteBlocking(b *testing.B) { } }() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { rb.Read(buf) } } @@ -104,8 +99,7 @@ func BenchmarkIoPipeReader(b *testing.B) { } }() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { pr.Read(buf) } } diff --git a/internal/ringbuffer/ring_buffer_test.go b/internal/ringbuffer/ring_buffer_test.go index a7f7c219f..72f1c5d1a 100644 --- a/internal/ringbuffer/ring_buffer_test.go +++ b/internal/ringbuffer/ring_buffer_test.go @@ -429,7 +429,7 @@ func TestRingBuffer_Blocking(t *testing.T) { read = io.MultiWriter(read, &readBuf) wrote = io.MultiWriter(wrote, &wroteBuf) } - debugln := func(args ...interface{}) { + debugln := func(args ...any) { if debug { fmt.Println(args...) } @@ -488,7 +488,7 @@ func TestRingBuffer_Blocking(t *testing.T) { { buf := make([]byte, 1024) writeRng := rand.New(rand.NewSource(2)) - for i := 0; i < 2500; i++ { + for range 2500 { writeRng.Read(buf) // Write n, err := rb.Write(buf[:writeRng.Intn(len(buf))]) @@ -592,7 +592,7 @@ func TestRingBuffer_BlockingBig(t *testing.T) { read = io.MultiWriter(read, &readBuf) wrote = io.MultiWriter(wrote, &wroteBuf) } - debugln := func(args ...interface{}) { + debugln := func(args ...any) { if debug { fmt.Println(args...) } @@ -651,7 +651,7 @@ func TestRingBuffer_BlockingBig(t *testing.T) { { writeRng := rand.New(rand.NewSource(2)) buf := make([]byte, 64<<10) - for i := 0; i < 500; i++ { + for range 500 { writeRng.Read(buf) // Write n, err := rb.Write(buf[:writeRng.Intn(len(buf))]) diff --git a/internal/s3select/csv/reader_contrib_test.go b/internal/s3select/csv/reader_contrib_test.go index 16c412e71..f2262f5e8 100644 --- a/internal/s3select/csv/reader_contrib_test.go +++ b/internal/s3select/csv/reader_contrib_test.go @@ -84,7 +84,7 @@ func TestRead(t *testing.T) { } type tester interface { - Fatal(...interface{}) + Fatal(...any) } func openTestFile(t tester, file string) []byte { @@ -508,10 +508,10 @@ func BenchmarkReaderBasic(b *testing.B) { } defer r.Close() b.ReportAllocs() - b.ResetTimer() + b.SetBytes(int64(len(f))) var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) @@ -537,7 +537,7 @@ func BenchmarkReaderHuge(b *testing.B) { AllowQuotedRecordDelimiter: false, unmarshaled: true, } - for n := 0; n < 11; n++ { + for n := range 11 { f := openTestFile(b, "nyc-taxi-data-100k.csv") want := 309 for i := 0; i < n; i++ { @@ -549,7 +549,7 @@ func BenchmarkReaderHuge(b *testing.B) { b.SetBytes(int64(len(f))) b.ResetTimer() var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) @@ -590,10 +590,10 @@ func BenchmarkReaderReplace(b *testing.B) { } defer r.Close() b.ReportAllocs() - b.ResetTimer() + b.SetBytes(int64(len(f))) var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) @@ -627,10 +627,10 @@ func BenchmarkReaderReplaceTwo(b *testing.B) { } defer r.Close() b.ReportAllocs() - b.ResetTimer() + b.SetBytes(int64(len(f))) var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) diff --git a/internal/s3select/csv/record.go b/internal/s3select/csv/record.go index 159c63f4f..57d4c9638 100644 --- a/internal/s3select/csv/record.go +++ b/internal/s3select/csv/record.go @@ -46,8 +46,8 @@ func (r *Record) Get(name string) (*sql.Value, error) { index, found := r.nameIndexMap[name] if !found { // Check if index. - if strings.HasPrefix(name, "_") { - idx, err := strconv.Atoi(strings.TrimPrefix(name, "_")) + if after, ok := strings.CutPrefix(name, "_"); ok { + idx, err := strconv.Atoi(after) if err != nil { return nil, fmt.Errorf("column %v not found", name) } @@ -133,12 +133,12 @@ func (r *Record) WriteJSON(writer io.Writer) error { } // Raw - returns the underlying data with format info. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { +func (r *Record) Raw() (sql.SelectObjectFormat, any) { return sql.SelectFmtCSV, r } // Replace - is not supported for CSV -func (r *Record) Replace(_ interface{}) error { +func (r *Record) Replace(_ any) error { return errors.New("Replace is not supported for CSV") } diff --git a/internal/s3select/json/preader_test.go b/internal/s3select/json/preader_test.go index fcdb5afc5..200befe86 100644 --- a/internal/s3select/json/preader_test.go +++ b/internal/s3select/json/preader_test.go @@ -88,7 +88,7 @@ func BenchmarkPReader(b *testing.B) { b.ReportAllocs() b.ResetTimer() var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r := NewPReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{}) for { record, err = r.Read(record) diff --git a/internal/s3select/json/reader_test.go b/internal/s3select/json/reader_test.go index 3a98fc500..6840cd18e 100644 --- a/internal/s3select/json/reader_test.go +++ b/internal/s3select/json/reader_test.go @@ -88,7 +88,7 @@ func BenchmarkReader(b *testing.B) { b.ReportAllocs() b.ResetTimer() var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r := NewReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{}) for { record, err = r.Read(record) diff --git a/internal/s3select/json/record.go b/internal/s3select/json/record.go index 65462e863..80b7019f9 100644 --- a/internal/s3select/json/record.go +++ b/internal/s3select/json/record.go @@ -76,7 +76,7 @@ func (r *Record) Clone(dst sql.Record) sql.Record { // Set - sets the value for a column name. func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) { - var v interface{} + var v any if b, ok := value.ToBool(); ok { v = b } else if f, ok := value.ToFloat(); ok { @@ -126,7 +126,7 @@ func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { columnValue = "" case RawJSON: columnValue = string([]byte(val)) - case []interface{}: + case []any: b, err := json.Marshal(val) if err != nil { return err @@ -151,7 +151,7 @@ func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { } // Raw - returns the underlying representation. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { +func (r *Record) Raw() (sql.SelectObjectFormat, any) { return r.SelectFormat, r.KVS } @@ -161,7 +161,7 @@ func (r *Record) WriteJSON(writer io.Writer) error { } // Replace the underlying buffer of json data. -func (r *Record) Replace(k interface{}) error { +func (r *Record) Replace(k any) error { v, ok := k.(jstream.KVS) if !ok { return fmt.Errorf("cannot replace internal data in json record with type %T", k) diff --git a/internal/s3select/jstream/decoder.go b/internal/s3select/jstream/decoder.go index c2bfd3f09..4b21fcf4e 100644 --- a/internal/s3select/jstream/decoder.go +++ b/internal/s3select/jstream/decoder.go @@ -29,14 +29,14 @@ type MetaValue struct { Offset int Length int Depth int - Value interface{} + Value any ValueType ValueType } // KV contains a key and value pair parsed from a decoded object type KV struct { - Key string `json:"key"` - Value interface{} `json:"value"` + Key string `json:"key"` + Value any `json:"value"` } // KVS - represents key values in an JSON object @@ -160,7 +160,7 @@ func (d *Decoder) decode() { } } -func (d *Decoder) emitAny() (interface{}, error) { +func (d *Decoder) emitAny() (any, error) { if d.pos >= atomic.LoadInt64(&d.end) { return nil, d.mkError(ErrUnexpectedEOF) } @@ -189,7 +189,7 @@ func (d *Decoder) willEmit() bool { // any used to decode any valid JSON value, and returns an // interface{} that holds the actual data -func (d *Decoder) any() (interface{}, ValueType, error) { +func (d *Decoder) any() (any, ValueType, error) { c := d.cur() switch c { @@ -239,7 +239,7 @@ func (d *Decoder) any() (interface{}, ValueType, error) { i, err := d.array() return i, Array, err case '{': - var i interface{} + var i any var err error if d.objectAsKVS { i, err = d.objectOrdered() @@ -426,7 +426,7 @@ func (d *Decoder) number() (float64, error) { } // array accept valid JSON array value -func (d *Decoder) array() ([]interface{}, error) { +func (d *Decoder) array() ([]any, error) { d.depth++ if d.maxDepth > 0 && d.depth > d.maxDepth { return nil, ErrMaxDepth @@ -434,9 +434,9 @@ func (d *Decoder) array() ([]interface{}, error) { var ( c byte - v interface{} + v any err error - array = make([]interface{}, 0) + array = make([]any, 0) ) // look ahead for ] - if the array is empty. @@ -470,7 +470,7 @@ out: } // object accept valid JSON array value -func (d *Decoder) object() (map[string]interface{}, error) { +func (d *Decoder) object() (map[string]any, error) { d.depth++ if d.maxDepth > 0 && d.depth > d.maxDepth { return nil, ErrMaxDepth @@ -479,15 +479,15 @@ func (d *Decoder) object() (map[string]interface{}, error) { var ( c byte k string - v interface{} + v any t ValueType err error - obj map[string]interface{} + obj map[string]any ) // skip allocating map if it will not be emitted if d.depth > d.emitDepth { - obj = make(map[string]interface{}) + obj = make(map[string]any) } // if the object has no keys @@ -567,7 +567,7 @@ func (d *Decoder) objectOrdered() (KVS, error) { var ( c byte k string - v interface{} + v any t ValueType err error obj KVS diff --git a/internal/s3select/jstream/decoder_test.go b/internal/s3select/jstream/decoder_test.go index 8c876fc9d..8f6fd7aa1 100644 --- a/internal/s3select/jstream/decoder_test.go +++ b/internal/s3select/jstream/decoder_test.go @@ -74,7 +74,7 @@ func TestDecoderFlat(t *testing.T) { 1, 2.5 ]` expected = []struct { - Value interface{} + Value any ValueType ValueType }{ { diff --git a/internal/s3select/jstream/scanner_test.go b/internal/s3select/jstream/scanner_test.go index de0be3b2c..374a75dfe 100644 --- a/internal/s3select/jstream/scanner_test.go +++ b/internal/s3select/jstream/scanner_test.go @@ -85,17 +85,17 @@ func TestScannerFailure(t *testing.T) { func BenchmarkBufioScanner(b *testing.B) { b.Run("small", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkBufioScanner(smallInput) } }) b.Run("medium", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkBufioScanner(mediumInput) } }) b.Run("large", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkBufioScanner(largeInput) } }) @@ -111,17 +111,17 @@ func benchmarkBufioScanner(b []byte) { func BenchmarkBufioReader(b *testing.B) { b.Run("small", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkBufioReader(smallInput) } }) b.Run("medium", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkBufioReader(mediumInput) } }) b.Run("large", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkBufioReader(largeInput) } }) @@ -145,17 +145,17 @@ loop: func BenchmarkScanner(b *testing.B) { b.Run("small", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkScanner(smallInput) } }) b.Run("medium", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkScanner(mediumInput) } }) b.Run("large", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { benchmarkScanner(largeInput) } }) diff --git a/internal/s3select/parquet/reader.go b/internal/s3select/parquet/reader.go index ae050e5ee..29357ddc6 100644 --- a/internal/s3select/parquet/reader.go +++ b/internal/s3select/parquet/reader.go @@ -56,7 +56,7 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) { kvs := jstream.KVS{} for _, col := range pr.r.Columns() { - var value interface{} + var value any if v, ok := nextRow[col.FlatName()]; ok { value, err = convertFromAnnotation(col.Element(), v) if err != nil { @@ -80,12 +80,12 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) { // annotations. LogicalType annotations if present override the deprecated // ConvertedType annotations. Ref: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md -func convertFromAnnotation(se *parquettypes.SchemaElement, v interface{}) (interface{}, error) { +func convertFromAnnotation(se *parquettypes.SchemaElement, v any) (any, error) { if se == nil { return v, nil } - var value interface{} + var value any switch val := v.(type) { case []byte: // TODO: only strings are supported in s3select output (not diff --git a/internal/s3select/select_benchmark_test.go b/internal/s3select/select_benchmark_test.go index d21c8325a..43e19e268 100644 --- a/internal/s3select/select_benchmark_test.go +++ b/internal/s3select/select_benchmark_test.go @@ -46,7 +46,7 @@ func genSampleCSVData(count int) []byte { csvWriter := csv.NewWriter(buf) csvWriter.Write([]string{"id", "name", "age", "city"}) - for i := 0; i < count; i++ { + for i := range count { csvWriter.Write([]string{ strconv.Itoa(i), newRandString(10), diff --git a/internal/s3select/select_test.go b/internal/s3select/select_test.go index 1f0ef4d96..7a623900a 100644 --- a/internal/s3select/select_test.go +++ b/internal/s3select/select_test.go @@ -630,7 +630,7 @@ func TestJSONQueries(t *testing.T) { if len(testReq) == 0 { var escaped bytes.Buffer xml.EscapeText(&escaped, []byte(testCase.query)) - testReq = []byte(fmt.Sprintf(defRequest, escaped.String())) + testReq = fmt.Appendf(nil, defRequest, escaped.String()) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -676,7 +676,7 @@ func TestJSONQueries(t *testing.T) { if len(testReq) == 0 { var escaped bytes.Buffer xml.EscapeText(&escaped, []byte(testCase.query)) - testReq = []byte(fmt.Sprintf(defRequest, escaped.String())) + testReq = fmt.Appendf(nil, defRequest, escaped.String()) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -761,7 +761,7 @@ func TestCSVQueries(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { testReq := testCase.requestXML if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) + testReq = fmt.Appendf(nil, defRequest, testCase.query) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -944,7 +944,7 @@ func TestCSVQueries2(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { testReq := testCase.requestXML if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) + testReq = fmt.Appendf(nil, defRequest, testCase.query) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -1088,7 +1088,7 @@ true`, t.Run(testCase.name, func(t *testing.T) { testReq := testCase.requestXML if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) + testReq = fmt.Appendf(nil, defRequest, testCase.query) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { diff --git a/internal/s3select/simdj/reader_amd64_test.go b/internal/s3select/simdj/reader_amd64_test.go index f9a2bca49..8028496af 100644 --- a/internal/s3select/simdj/reader_amd64_test.go +++ b/internal/s3select/simdj/reader_amd64_test.go @@ -31,7 +31,7 @@ import ( ) type tester interface { - Fatal(args ...interface{}) + Fatal(args ...any) } func loadCompressed(t tester, file string) (js []byte) { diff --git a/internal/s3select/simdj/record.go b/internal/s3select/simdj/record.go index 9f66069d6..6adb404c6 100644 --- a/internal/s3select/simdj/record.go +++ b/internal/s3select/simdj/record.go @@ -185,7 +185,7 @@ allElems: } // Raw - returns the underlying representation. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { +func (r *Record) Raw() (sql.SelectObjectFormat, any) { return sql.SelectFmtSIMDJSON, r.object } @@ -211,7 +211,7 @@ func (r *Record) WriteJSON(writer io.Writer) error { } // Replace the underlying buffer of json data. -func (r *Record) Replace(k interface{}) error { +func (r *Record) Replace(k any) error { v, ok := k.(simdjson.Object) if !ok { return fmt.Errorf("cannot replace internal data in simd json record with type %T", k) diff --git a/internal/s3select/sql/evaluate.go b/internal/s3select/sql/evaluate.go index 95dd716da..1cae1e92d 100644 --- a/internal/s3select/sql/evaluate.go +++ b/internal/s3select/sql/evaluate.go @@ -413,7 +413,7 @@ func (e *JSONPath) evalNode(r Record, tableAlias string) (*Value, error) { } // jsonToValue will convert the json value to an internal value. -func jsonToValue(result interface{}) (*Value, error) { +func jsonToValue(result any) (*Value, error) { switch rval := result.(type) { case string: return FromString(rval), nil @@ -434,7 +434,7 @@ func jsonToValue(result interface{}) (*Value, error) { return nil, err } return FromBytes(bs), nil - case []interface{}: + case []any: dst := make([]Value, len(rval)) for i := range rval { v, err := jsonToValue(rval[i]) diff --git a/internal/s3select/sql/jsonpath.go b/internal/s3select/sql/jsonpath.go index 9ac995e96..3b18f47a4 100644 --- a/internal/s3select/sql/jsonpath.go +++ b/internal/s3select/sql/jsonpath.go @@ -34,7 +34,7 @@ var ( // jsonpathEval evaluates a JSON path and returns the value at the path. // If the value should be considered flat (from wildcards) any array returned should be considered individual values. -func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool, err error) { +func jsonpathEval(p []*JSONPathElement, v any) (r any, flat bool, err error) { // fmt.Printf("JPATHexpr: %v jsonobj: %v\n\n", p, v) if len(p) == 0 || v == nil { return v, false, nil @@ -71,7 +71,7 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool case p[0].Index != nil: idx := *p[0].Index - arr, ok := v.([]interface{}) + arr, ok := v.([]any) if !ok { return nil, false, errIndexLookup } @@ -100,14 +100,14 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool } case p[0].ArrayWildcard: - arr, ok := v.([]interface{}) + arr, ok := v.([]any) if !ok { return nil, false, errWildcardArrayLookup } // Lookup remainder of path in each array element and // make result array. - var result []interface{} + var result []any for _, a := range arr { rval, flatten, err := jsonpathEval(p[1:], a) if err != nil { @@ -116,7 +116,7 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool if flatten { // Flatten if array. - if arr, ok := rval.([]interface{}); ok { + if arr, ok := rval.([]any); ok { result = append(result, arr...) continue } diff --git a/internal/s3select/sql/jsonpath_test.go b/internal/s3select/sql/jsonpath_test.go index 2825e9a9e..bbe9ea685 100644 --- a/internal/s3select/sql/jsonpath_test.go +++ b/internal/s3select/sql/jsonpath_test.go @@ -30,9 +30,9 @@ import ( "github.com/minio/minio/internal/s3select/jstream" ) -func getJSONStructs(b []byte) ([]interface{}, error) { +func getJSONStructs(b []byte) ([]any, error) { dec := jstream.NewDecoder(bytes.NewBuffer(b), 0).ObjectAsKVS().MaxDepth(100) - var result []interface{} + var result []any for parsedVal := range dec.Stream() { result = append(result, parsedVal.Value) } @@ -60,13 +60,13 @@ func TestJsonpathEval(t *testing.T) { ) cases := []struct { str string - res []interface{} + res []any }{ - {"s.title", []interface{}{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}}, - {"s.authorInfo.yearRange", []interface{}{[]interface{}{1890.0, 1976.0}, []interface{}{1920.0, 1992.0}, []interface{}{1881.0, 1975.0}}}, - {"s.authorInfo.name", []interface{}{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}}, - {"s.authorInfo.yearRange[0]", []interface{}{1890.0, 1920.0, 1881.0}}, - {"s.publicationHistory[0].pages", []interface{}{256.0, 336.0, Missing{}}}, + {"s.title", []any{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}}, + {"s.authorInfo.yearRange", []any{[]any{1890.0, 1976.0}, []any{1920.0, 1992.0}, []any{1881.0, 1975.0}}}, + {"s.authorInfo.name", []any{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}}, + {"s.authorInfo.yearRange[0]", []any{1890.0, 1920.0, 1881.0}}, + {"s.publicationHistory[0].pages", []any{256.0, 336.0, Missing{}}}, } for i, tc := range cases { t.Run(tc.str, func(t *testing.T) { diff --git a/internal/s3select/sql/record.go b/internal/s3select/sql/record.go index 925b6e95e..2bc25df46 100644 --- a/internal/s3select/sql/record.go +++ b/internal/s3select/sql/record.go @@ -63,16 +63,16 @@ type Record interface { Reset() // Returns underlying representation - Raw() (SelectObjectFormat, interface{}) + Raw() (SelectObjectFormat, any) // Replaces the underlying data - Replace(k interface{}) error + Replace(k any) error } // IterToValue converts a simdjson Iter to its underlying value. // Objects are returned as simdjson.Object // Arrays are returned as []interface{} with parsed values. -func IterToValue(iter simdjson.Iter) (interface{}, error) { +func IterToValue(iter simdjson.Iter) (any, error) { switch iter.Type() { case simdjson.TypeString: v, err := iter.String() @@ -118,7 +118,7 @@ func IterToValue(iter simdjson.Iter) (interface{}, error) { return nil, err } iter := arr.Iter() - var dst []interface{} + var dst []any var next simdjson.Iter for { typ, err := iter.AdvanceIter(&next) diff --git a/internal/s3select/sql/statement.go b/internal/s3select/sql/statement.go index 14068b6d7..d721c2b3e 100644 --- a/internal/s3select/sql/statement.go +++ b/internal/s3select/sql/statement.go @@ -174,7 +174,7 @@ func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, erro case jstream.KVS: kvs = v - case []interface{}: + case []any: recs := make([]*Record, len(v)) for i, val := range v { tmpRec := input.Clone(nil) @@ -207,7 +207,7 @@ func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, erro return nil, err } - case []interface{}: + case []any: recs := make([]*Record, len(v)) for i, val := range v { tmpRec := input.Clone(nil) diff --git a/internal/s3select/sql/value.go b/internal/s3select/sql/value.go index 7bd6f780d..24bb84f90 100644 --- a/internal/s3select/sql/value.go +++ b/internal/s3select/sql/value.go @@ -46,7 +46,7 @@ var ( // the type may not be determined yet. In these cases, a byte-slice is // used. type Value struct { - value interface{} + value any } // Missing is used to indicate a non-existing value. diff --git a/internal/s3select/sql/value_test.go b/internal/s3select/sql/value_test.go index 4d77ad2ab..416d7409e 100644 --- a/internal/s3select/sql/value_test.go +++ b/internal/s3select/sql/value_test.go @@ -217,7 +217,7 @@ func TestValue_CSVString(t *testing.T) { func TestValue_bytesToInt(t *testing.T) { type fields struct { - value interface{} + value any } tests := []struct { name string @@ -367,7 +367,7 @@ func TestValue_bytesToInt(t *testing.T) { func TestValue_bytesToFloat(t *testing.T) { type fields struct { - value interface{} + value any } tests := []struct { name string @@ -569,7 +569,7 @@ func TestValue_bytesToFloat(t *testing.T) { func TestValue_bytesToBool(t *testing.T) { type fields struct { - value interface{} + value any } tests := []struct { name string diff --git a/internal/store/batch_test.go b/internal/store/batch_test.go index faf9d4556..4754e9a93 100644 --- a/internal/store/batch_test.go +++ b/internal/store/batch_test.go @@ -41,7 +41,7 @@ func TestBatchCommit(t *testing.T) { Limit: limit, Store: store, CommitTimeout: 5 * time.Minute, - Log: func(ctx context.Context, err error, id string, errKind ...interface{}) { + Log: func(ctx context.Context, err error, id string, errKind ...any) { t.Log(err) }, }) @@ -106,7 +106,7 @@ func TestBatchCommitOnExit(t *testing.T) { Limit: limit, Store: store, CommitTimeout: 5 * time.Minute, - Log: func(ctx context.Context, err error, id string, errKind ...interface{}) { + Log: func(ctx context.Context, err error, id string, errKind ...any) { t.Log([]any{err, id, errKind}...) }, }) @@ -163,7 +163,7 @@ func TestBatchWithConcurrency(t *testing.T) { Limit: limit, Store: store, CommitTimeout: 5 * time.Minute, - Log: func(ctx context.Context, err error, id string, errKind ...interface{}) { + Log: func(ctx context.Context, err error, id string, errKind ...any) { t.Log(err) }, }) diff --git a/internal/store/queuestore_test.go b/internal/store/queuestore_test.go index 8e50d5964..5211520c1 100644 --- a/internal/store/queuestore_test.go +++ b/internal/store/queuestore_test.go @@ -69,7 +69,7 @@ func TestQueueStorePut(t *testing.T) { t.Fatal("Failed to create a queue store ", err) } // Put 100 items. - for i := 0; i < 100; i++ { + for range 100 { if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } @@ -93,7 +93,7 @@ func TestQueueStoreGet(t *testing.T) { t.Fatal("Failed to create a queue store ", err) } // Put 10 items - for i := 0; i < 10; i++ { + for range 10 { if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } @@ -127,7 +127,7 @@ func TestQueueStoreDel(t *testing.T) { t.Fatal("Failed to create a queue store ", err) } // Put 20 items. - for i := 0; i < 20; i++ { + for range 20 { if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } @@ -163,7 +163,7 @@ func TestQueueStoreLimit(t *testing.T) { if err != nil { t.Fatal("Failed to create a queue store ", err) } - for i := 0; i < 5; i++ { + for range 5 { if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } @@ -185,7 +185,7 @@ func TestQueueStoreListN(t *testing.T) { if err != nil { t.Fatal("Failed to create a queue store ", err) } - for i := 0; i < 10; i++ { + for range 10 { if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } @@ -237,7 +237,7 @@ func TestMultiplePutGetRaw(t *testing.T) { } // TestItem{Name: "test-item", Property: "property"} var items []TestItem - for i := 0; i < 10; i++ { + for i := range 10 { items = append(items, TestItem{ Name: fmt.Sprintf("test-item-%d", i), Property: "property", @@ -303,7 +303,7 @@ func TestMultiplePutGets(t *testing.T) { } // TestItem{Name: "test-item", Property: "property"} var items []TestItem - for i := 0; i < 10; i++ { + for i := range 10 { items = append(items, TestItem{ Name: fmt.Sprintf("test-item-%d", i), Property: "property", @@ -359,7 +359,7 @@ func TestMixedPutGets(t *testing.T) { } // TestItem{Name: "test-item", Property: "property"} var items []TestItem - for i := 0; i < 5; i++ { + for i := range 5 { items = append(items, TestItem{ Name: fmt.Sprintf("test-item-%d", i), Property: "property", diff --git a/internal/store/store.go b/internal/store/store.go index fcd76dc60..d5dd80b53 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -32,7 +32,7 @@ const ( retryInterval = 3 * time.Second ) -type logger = func(ctx context.Context, err error, id string, errKind ...interface{}) +type logger = func(ctx context.Context, err error, id string, errKind ...any) // ErrNotConnected - indicates that the target connection is not active. var ErrNotConnected = errors.New("not connected to target server/service")