From 9f24ca5d66dfe96129bf100dd83b7edd8cf6b93d Mon Sep 17 00:00:00 2001 From: jiuker <2818723467@qq.com> Date: Tue, 27 May 2025 23:18:26 +0800 Subject: [PATCH 1/7] fix: empty fileName cause Reader nil for PostPolicyBucketHandler (#21323) --- cmd/bucket-handlers.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index b67b7c36a..21231d397 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -1089,6 +1089,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h break } + // check if have a file + if reader == nil { + apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) + apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing")) + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + if keyName, ok := formValues["Key"]; !ok { apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing")) From ea77bcfc98c0688136ea15a020f588cbcee437a9 Mon Sep 17 00:00:00 2001 From: jiuker <2818723467@qq.com> Date: Tue, 27 May 2025 23:18:36 +0800 Subject: [PATCH 2/7] fix: panic for TestListObjectsWithILM (#21322) --- cmd/object-api-listobjects_test.go | 11 ++++++++++- cmd/server-main.go | 4 +++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index b6f39d7ab..00dd61328 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -1938,6 +1938,10 @@ func TestListObjectsWithILM(t *testing.T) { } func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHandler) { + // Prepare lifecycle expiration workers + es := newExpiryState(t1.Context(), obj, 0) + globalExpiryState = es + t, _ := t1.(*testing.T) objContent := "test-content" @@ -1977,7 +1981,12 @@ func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHand t.Fatalf("%s : %s", instanceType, err.Error()) } - globalBucketMetadataSys.Set(upload.bucket, BucketMetadata{lifecycleConfig: lifecycleConfig}) + metadata, err := globalBucketMetadataSys.Get(upload.bucket) + if err != nil { + t.Fatal(err) + } + metadata.lifecycleConfig = lifecycleConfig + globalBucketMetadataSys.Set(upload.bucket, metadata) defer globalBucketMetadataSys.Remove(upload.bucket) // Upload objects which modtime as one week ago, supposed to be expired by ILM diff --git a/cmd/server-main.go b/cmd/server-main.go index 53df3081e..ade86cca1 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -452,7 +452,9 @@ func initAllSubsystems(ctx context.Context) { globalNotificationSys = NewNotificationSys(globalEndpoints) // Create new notification system - globalEventNotifier = NewEventNotifier(GlobalContext) + if globalEventNotifier == nil { + globalEventNotifier = NewEventNotifier(GlobalContext) + } // Create new bucket metadata system. if globalBucketMetadataSys == nil { From 0a36d41dcd86fa7cfe8a2077e9f6f7bbb6a88d02 Mon Sep 17 00:00:00 2001 From: ILIYA <68940374+12ya@users.noreply.github.com> Date: Wed, 28 May 2025 00:19:03 +0900 Subject: [PATCH 3/7] modernizes for loop in cmd/, internal/ (#21309) --- cmd/api-utils.go | 6 +++--- cmd/background-heal-ops.go | 2 +- cmd/batch-handlers.go | 2 +- cmd/consolelogger.go | 2 +- cmd/erasure-multipart.go | 2 +- cmd/erasure-object.go | 8 ++++---- cmd/erasure-server-pool-rebalance.go | 2 +- cmd/erasure-sets.go | 12 ++++++------ cmd/fmt-gen.go | 2 +- cmd/format-erasure.go | 6 +++--- cmd/mrf.go | 2 +- cmd/notification.go | 2 +- cmd/object-api-utils.go | 6 +++--- cmd/os-readdir_test.go | 6 +++--- cmd/speedtest.go | 2 +- cmd/storage-rest-server.go | 2 +- cmd/utils.go | 4 ++-- cmd/xl-storage-format-v2.go | 2 +- internal/dsync/drwmutex.go | 2 +- internal/event/targetlist.go | 2 +- internal/grid/connection.go | 2 +- internal/grid/debug.go | 2 +- internal/grid/muxclient.go | 2 +- internal/grid/muxserver.go | 2 +- internal/s3select/csv/reader.go | 2 +- internal/s3select/json/preader.go | 2 +- internal/s3select/jstream/decoder.go | 2 +- 27 files changed, 44 insertions(+), 44 deletions(-) diff --git a/cmd/api-utils.go b/cmd/api-utils.go index ab191f067..ee8fa5335 100644 --- a/cmd/api-utils.go +++ b/cmd/api-utils.go @@ -43,7 +43,7 @@ func shouldEscape(c byte) bool { // - Force encoding of '~' func s3URLEncode(s string) string { spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { + for i := range len(s) { c := s[i] if shouldEscape(c) { if c == ' ' { @@ -70,7 +70,7 @@ func s3URLEncode(s string) string { if hexCount == 0 { copy(t, s) - for i := 0; i < len(s); i++ { + for i := range len(s) { if s[i] == ' ' { t[i] = '+' } @@ -79,7 +79,7 @@ func s3URLEncode(s string) string { } j := 0 - for i := 0; i < len(s); i++ { + for i := range len(s) { switch c := s[i]; { case c == ' ': t[j] = '+' diff --git a/cmd/background-heal-ops.go b/cmd/background-heal-ops.go index 8f9d349cc..3eeff5098 100644 --- a/cmd/background-heal-ops.go +++ b/cmd/background-heal-ops.go @@ -102,7 +102,7 @@ func waitForLowHTTPReq() { func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) { bgSeq := newBgHealSequence() // Run the background healer - for i := 0; i < globalBackgroundHealRoutine.workers; i++ { + for range globalBackgroundHealRoutine.workers { go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq) } diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index 52e57a3e9..1a1d3d598 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -248,7 +248,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a pInfo PartInfo ) - for i := 0; i < partsCount; i++ { + for i := range partsCount { gopts := minio.GetObjectOptions{ VersionID: srcObjInfo.VersionID, PartNumber: i + 1, diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 49b930766..624e96855 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st sys.RUnlock() // send last n console log messages in order filtered by node if cnt > 0 { - for i := 0; i < last; i++ { + for i := range last { entry := lastN[(cnt+i)%last] if (entry == log.Info{}) { continue diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index ff4dddf81..8f806d8ce 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -1481,7 +1481,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } } - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { // Object info is the same in all disks, so we can pick // the first meta from online disk diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 162f16209..b8f57248b 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -504,7 +504,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st // count the number of offline disks offline := 0 - for i := 0; i < len(errs); i++ { + for i := range len(errs) { var found bool switch { case errors.Is(errs[i], errDiskNotFound): @@ -1221,7 +1221,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * partsMetadata[index].SetInlineData() } - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { // Object info is the same in all disks, so we can pick // the first meta from online disk @@ -1557,7 +1557,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st return ObjectInfo{}, toObjectErr(err, bucket, object) } - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { // Object info is the same in all disks, so we can pick // the first meta from online disk @@ -1574,7 +1574,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st if len(versions) == 0 { // Whether a disk was initially or becomes offline // during this upload, send it to the MRF list. - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { continue } diff --git a/cmd/erasure-server-pool-rebalance.go b/cmd/erasure-server-pool-rebalance.go index 38d68de0e..0c40ffb3e 100644 --- a/cmd/erasure-server-pool-rebalance.go +++ b/cmd/erasure-server-pool-rebalance.go @@ -149,7 +149,7 @@ func (z *erasureServerPools) findIndex(index int) int { if z.rebalMeta == nil { return 0 } - for i := 0; i < len(z.rebalMeta.PoolStats); i++ { + for i := range len(z.rebalMeta.PoolStats) { if i == index { return index } diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index aa09b3a5d..be73b59ab 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -95,7 +95,7 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI { s.erasureDisksMu.RLock() defer s.erasureDisksMu.RUnlock() - for i := 0; i < s.setCount; i++ { + for i := range s.setCount { for j := 0; j < s.setDriveCount; j++ { disk := s.erasureDisks[i][j] if disk == OfflineDisk { @@ -150,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int, if diskID == offlineDiskUUID { return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID) } - for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for i := range len(refFormat.Erasure.Sets) { for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { if refFormat.Erasure.Sets[i][j] == diskID { return i, j, nil @@ -174,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) { return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This) } - for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for i := range len(refFormat.Erasure.Sets) { for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { if refFormat.Erasure.Sets[i][j] == format.Erasure.This { return i, j, nil @@ -377,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ mutex := newNSLock(globalIsDistErasure) - for i := 0; i < setCount; i++ { + for i := range setCount { s.erasureDisks[i] = make([]StorageAPI, setDriveCount) } @@ -390,7 +390,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ var wg sync.WaitGroup var lk sync.Mutex - for i := 0; i < setCount; i++ { + for i := range setCount { lockerEpSet := set.NewStringSet() for j := 0; j < setDriveCount; j++ { wg.Add(1) @@ -409,7 +409,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ } wg.Wait() - for i := 0; i < setCount; i++ { + for i := range setCount { wg.Add(1) go func(i int) { defer wg.Done() diff --git a/cmd/fmt-gen.go b/cmd/fmt-gen.go index 0032ac3e5..8e8739573 100644 --- a/cmd/fmt-gen.go +++ b/cmd/fmt-gen.go @@ -98,7 +98,7 @@ func fmtGenMain(ctxt *cli.Context) { setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet format := newFormatErasureV3(setCount, setDriveCount) format.ID = deploymentID - for i := 0; i < setCount; i++ { // for each erasure set + for i := range setCount { // for each erasure set for j := 0; j < setDriveCount; j++ { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] diff --git a/cmd/format-erasure.go b/cmd/format-erasure.go index 93b0a4a76..d37833349 100644 --- a/cmd/format-erasure.go +++ b/cmd/format-erasure.go @@ -157,7 +157,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 { format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3 format.Erasure.Sets = make([][]string, numSets) - for i := 0; i < numSets; i++ { + for i := range numSets { format.Erasure.Sets[i] = make([]string, setLen) for j := 0; j < setLen; j++ { format.Erasure.Sets[i][j] = mustGetUUID() @@ -514,7 +514,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e } // Make sure that the diskID is found in the set. - for i := 0; i < len(tmpFormat.Erasure.Sets); i++ { + for i := range len(tmpFormat.Erasure.Sets) { for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ { if this == tmpFormat.Erasure.Sets[i][j] { return nil @@ -639,7 +639,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, return nil, err } - for i := 0; i < setCount; i++ { + for i := range setCount { hostCount := make(map[string]int, setDriveCount) for j := 0; j < setDriveCount; j++ { disk := storageDisks[i*setDriveCount+j] diff --git a/cmd/mrf.go b/cmd/mrf.go index af9b0e03c..4d002c27a 100644 --- a/cmd/mrf.go +++ b/cmd/mrf.go @@ -266,7 +266,7 @@ func (m *mrfState) healRoutine(z *erasureServerPools) { if len(u.Versions) > 0 { vers := len(u.Versions) / 16 if vers > 0 { - for i := 0; i < vers; i++ { + for i := range vers { healObject(u.Bucket, u.Object, uuid.UUID(u.Versions[16*i:]).String(), scan) } } diff --git a/cmd/notification.go b/cmd/notification.go index 3ae544a1b..36dc70e07 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -123,7 +123,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a } retryCount := g.retryCount - for i := 0; i < retryCount; i++ { + for i := range retryCount { g.errs[index].Err = nil if err := f(); err != nil { g.errs[index].Err = err diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 4c29097f0..6275686fa 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -128,7 +128,7 @@ func IsValidBucketName(bucket string) bool { // 'label' in AWS terminology and if the bucket looks // like an IP address. isNotNumber := false - for i := 0; i < len(piece); i++ { + for i := range len(piece) { switch { case (piece[i] >= 'a' && piece[i] <= 'z' || piece[i] == '-'): @@ -254,11 +254,11 @@ func concat(ss ...string) string { } // create & allocate the memory in advance. n := 0 - for i := 0; i < length; i++ { + for i := range length { n += len(ss[i]) } b := make([]byte, 0, n) - for i := 0; i < length; i++ { + for i := range length { b = append(b, ss[i]...) } return unsafe.String(unsafe.SliceData(b), n) diff --git a/cmd/os-readdir_test.go b/cmd/os-readdir_test.go index 9fac2b7d0..5649b5391 100644 --- a/cmd/os-readdir_test.go +++ b/cmd/os-readdir_test.go @@ -77,7 +77,7 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) { func setupTestReadDirFiles(t *testing.T) (testResults []result) { dir := t.TempDir() entries := []string{} - for i := 0; i < 10; i++ { + for i := range 10 { name := fmt.Sprintf("file-%d", i) if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil { // For cleanup, its required to add these entries into test results. @@ -102,7 +102,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) { t.Fatalf("Unable to create prefix directory \"mydir\", %s", err) } entries := []string{"mydir/"} - for i := 0; i < 10; i++ { + for i := range 10 { name := fmt.Sprintf("file-%d", i) if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil { // For cleanup, its required to add these entries into test results. @@ -126,7 +126,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) { } dir := t.TempDir() entries := []string{} - for i := 0; i < 10; i++ { + for i := range 10 { name1 := fmt.Sprintf("file-%d", i) name2 := fmt.Sprintf("file-%d", i+10) if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil { diff --git a/cmd/speedtest.go b/cmd/speedtest.go index ddf309964..f91db787d 100644 --- a/cmd/speedtest.go +++ b/cmd/speedtest.go @@ -102,7 +102,7 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT var totalUploadTimes madmin.TimeDurations var totalDownloadTimes madmin.TimeDurations var totalDownloadTTFB madmin.TimeDurations - for i := 0; i < len(throughputHighestResults); i++ { + for i := range len(throughputHighestResults) { errStr := "" if throughputHighestResults[i].Error != "" { errStr = throughputHighestResults[i].Error diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index 498f7b2fb..927571b3d 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -675,7 +675,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http versions := make([]FileInfoVersions, totalVersions) decoder := msgpNewReader(r.Body) defer readMsgpReaderPoolPut(decoder) - for i := 0; i < totalVersions; i++ { + for i := range totalVersions { dst := &versions[i] if err := dst.DecodeMsg(decoder); err != nil { s.writeErrorResponse(w, err) diff --git a/cmd/utils.go b/cmd/utils.go index 90ce955a9..833f4ac87 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -851,7 +851,7 @@ func lcp(strs []string, pre bool) string { // compare letters if pre { // prefix, iterate left to right - for i := 0; i < maxl; i++ { + for i := range maxl { if xfix[i] != str[i] { xfix = xfix[:i] break @@ -859,7 +859,7 @@ func lcp(strs []string, pre bool) string { } } else { // suffix, iterate right to left - for i := 0; i < maxl; i++ { + for i := range maxl { xi := xfixl - i - 1 si := strl - i - 1 if xfix[xi] != str[si] { diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go index 74f8ea91f..128f1c096 100644 --- a/cmd/xl-storage-format-v2.go +++ b/cmd/xl-storage-format-v2.go @@ -846,7 +846,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte, // Any non-nil error is returned. func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) { var tHdr, tMeta []byte // Zero copy bytes - for i := 0; i < versions; i++ { + for i := range versions { tHdr, buf, err = msgp.ReadBytesZC(buf) if err != nil { return err diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index ab58bb12c..04ed6ccbb 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -381,7 +381,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int) lockNotFound, lockRefreshed := 0, 0 done := false - for i := 0; i < len(restClnts); i++ { + for range len(restClnts) { select { case refreshResult := <-ch: if refreshResult.offline { diff --git a/internal/event/targetlist.go b/internal/event/targetlist.go index 3ebe6f0fe..28eff2b18 100644 --- a/internal/event/targetlist.go +++ b/internal/event/targetlist.go @@ -357,7 +357,7 @@ func (list *TargetList) startSendWorkers(workerCount int) { if err != nil { panic(err) } - for i := 0; i < workerCount; i++ { + for range workerCount { wk.Take() go func() { defer wk.Give() diff --git a/internal/grid/connection.go b/internal/grid/connection.go index a193b0e79..522568278 100644 --- a/internal/grid/connection.go +++ b/internal/grid/connection.go @@ -1041,7 +1041,7 @@ func (c *Connection) readStream(ctx context.Context, conn net.Conn, cancel conte // Handle merged messages. messages := int(m.Seq) c.inMessages.Add(int64(messages)) - for i := 0; i < messages; i++ { + for range messages { if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected { return } diff --git a/internal/grid/debug.go b/internal/grid/debug.go index a6b3e2606..6278eb754 100644 --- a/internal/grid/debug.go +++ b/internal/grid/debug.go @@ -143,7 +143,7 @@ func (t *TestGrid) WaitAllConnect(ctx context.Context) { } func getHosts(n int) (hosts []string, listeners []net.Listener, err error) { - for i := 0; i < n; i++ { + for range n { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { diff --git a/internal/grid/muxclient.go b/internal/grid/muxclient.go index 5ec8fb347..0f9ca9de2 100644 --- a/internal/grid/muxclient.go +++ b/internal/grid/muxclient.go @@ -574,7 +574,7 @@ func (m *muxClient) ack(seq uint32) { return } available := cap(m.outBlock) - for i := 0; i < available; i++ { + for range available { m.outBlock <- struct{}{} } m.acked = true diff --git a/internal/grid/muxserver.go b/internal/grid/muxserver.go index 06a3f1f74..61707d5e7 100644 --- a/internal/grid/muxserver.go +++ b/internal/grid/muxserver.go @@ -130,7 +130,7 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea // Fill outbound block. // Each token represents a message that can be sent to the client without blocking. // The client will refill the tokens as they confirm delivery of the messages. - for i := 0; i < outboundCap; i++ { + for range outboundCap { m.outBlock <- struct{}{} } diff --git a/internal/s3select/csv/reader.go b/internal/s3select/csv/reader.go index 9d5f11c1e..661109830 100644 --- a/internal/s3select/csv/reader.go +++ b/internal/s3select/csv/reader.go @@ -230,7 +230,7 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { }() // Start parsers - for i := 0; i < runtime.GOMAXPROCS(0); i++ { + for range runtime.GOMAXPROCS(0) { go func() { for in := range r.input { if len(in.input) == 0 { diff --git a/internal/s3select/json/preader.go b/internal/s3select/json/preader.go index 9ceef5d6b..a9ef13435 100644 --- a/internal/s3select/json/preader.go +++ b/internal/s3select/json/preader.go @@ -173,7 +173,7 @@ func (r *PReader) startReaders() { }() // Start parsers - for i := 0; i < runtime.GOMAXPROCS(0); i++ { + for range runtime.GOMAXPROCS(0) { go func() { for in := range r.input { if len(in.input) == 0 { diff --git a/internal/s3select/jstream/decoder.go b/internal/s3select/jstream/decoder.go index abd2c5d51..c2bfd3f09 100644 --- a/internal/s3select/jstream/decoder.go +++ b/internal/s3select/jstream/decoder.go @@ -332,7 +332,7 @@ func (d *Decoder) u4() rune { // logic taken from: // github.com/buger/jsonparser/blob/master/escape.go#L20 var h [4]int - for i := 0; i < 4; i++ { + for i := range 4 { c := d.next() switch { case c >= '0' && c <= '9': From b4b3d208dd7dad1ac67ce662412b89b0d70d68b6 Mon Sep 17 00:00:00 2001 From: Shubhendu Date: Thu, 5 Jun 2025 02:15:31 +0530 Subject: [PATCH 4/7] Add `targetArn` label for bucket replication metrics (#21354) Signed-off-by: Shubhendu Ram Tripathi --- cmd/metrics-v3-bucket-replication.go | 38 ++++++++++++++-------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/cmd/metrics-v3-bucket-replication.go b/cmd/metrics-v3-bucket-replication.go index ef341801a..5f16d32e6 100644 --- a/cmd/metrics-v3-bucket-replication.go +++ b/cmd/metrics-v3-bucket-replication.go @@ -49,61 +49,61 @@ const ( var ( bucketReplLastHrFailedBytesMD = NewGaugeMD(bucketReplLastHrFailedBytes, "Total number of bytes failed at least once to replicate in the last hour on a bucket", - bucketL) + bucketL, targetArnL) bucketReplLastHrFailedCountMD = NewGaugeMD(bucketReplLastHrFailedCount, "Total number of objects which failed replication in the last hour on a bucket", - bucketL) + bucketL, targetArnL) bucketReplLastMinFailedBytesMD = NewGaugeMD(bucketReplLastMinFailedBytes, "Total number of bytes failed at least once to replicate in the last full minute on a bucket", - bucketL) + bucketL, targetArnL) bucketReplLastMinFailedCountMD = NewGaugeMD(bucketReplLastMinFailedCount, "Total number of objects which failed replication in the last full minute on a bucket", - bucketL) + bucketL, targetArnL) bucketReplLatencyMsMD = NewGaugeMD(bucketReplLatencyMs, "Replication latency on a bucket in milliseconds", bucketL, operationL, rangeL, targetArnL) bucketReplProxiedDeleteTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsTotal, "Number of DELETE tagging requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedGetRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetRequestsFailures, "Number of failures in GET requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedGetRequestsTotalMD = NewCounterMD(bucketReplProxiedGetRequestsTotal, "Number of GET requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedGetTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsFailures, "Number of failures in GET tagging requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedGetTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsTotal, "Number of GET tagging requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedHeadRequestsFailuresMD = NewCounterMD(bucketReplProxiedHeadRequestsFailures, "Number of failures in HEAD requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedHeadRequestsTotalMD = NewCounterMD(bucketReplProxiedHeadRequestsTotal, "Number of HEAD requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedPutTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsFailures, "Number of failures in PUT tagging requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplProxiedPutTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsTotal, "Number of PUT tagging requests proxied to replication target", - bucketL) + bucketL, targetArnL) bucketReplSentBytesMD = NewCounterMD(bucketReplSentBytes, "Total number of bytes replicated to the target", - bucketL) + bucketL, targetArnL) bucketReplSentCountMD = NewCounterMD(bucketReplSentCount, "Total number of objects replicated to the target", - bucketL) + bucketL, targetArnL) bucketReplTotalFailedBytesMD = NewCounterMD(bucketReplTotalFailedBytes, "Total number of bytes failed at least once to replicate since server start", - bucketL) + bucketL, targetArnL) bucketReplTotalFailedCountMD = NewCounterMD(bucketReplTotalFailedCount, "Total number of objects which failed replication since server start", - bucketL) + bucketL, targetArnL) bucketReplProxiedDeleteTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsFailures, "Number of failures in DELETE tagging requests proxied to replication target", - bucketL) + bucketL, targetArnL) ) // loadBucketReplicationMetrics - `BucketMetricsLoaderFn` for bucket replication metrics @@ -121,11 +121,11 @@ func loadBucketReplicationMetrics(ctx context.Context, m MetricValues, c *metric bucketReplStats := globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage) for _, bucket := range buckets { - labels := []string{bucketL, bucket} if s, ok := bucketReplStats[bucket]; ok { stats := s.ReplicationStats if stats.hasReplicationUsage() { for arn, stat := range stats.Stats { + labels := []string{bucketL, bucket, targetArnL, arn} m.Set(bucketReplLastHrFailedBytes, float64(stat.Failed.LastHour.Bytes), labels...) m.Set(bucketReplLastHrFailedCount, float64(stat.Failed.LastHour.Count), labels...) m.Set(bucketReplLastMinFailedBytes, float64(stat.Failed.LastMinute.Bytes), labels...) From e2245a0b12e3dc29b8becdaca78419fde3b97387 Mon Sep 17 00:00:00 2001 From: ffgan <114909534+ffgan@users.noreply.github.com> Date: Mon, 9 Jun 2025 00:12:05 +0800 Subject: [PATCH 5/7] allow cross-compiling support for RISC-V 64 (#21348) this is minor PR that supports building on RISC-V 64, this PR is for compilation only. There is no guarantee that code is tested and will work in production. --- buildscripts/checkdeps.sh | 4 ++-- buildscripts/cross-compile.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/buildscripts/checkdeps.sh b/buildscripts/checkdeps.sh index 11ecc4db0..ed4f666ea 100755 --- a/buildscripts/checkdeps.sh +++ b/buildscripts/checkdeps.sh @@ -74,11 +74,11 @@ check_minimum_version() { assert_is_supported_arch() { case "${ARCH}" in - x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64) + x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64) return ;; *) - echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]" + echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]" exit 1 ;; esac diff --git a/buildscripts/cross-compile.sh b/buildscripts/cross-compile.sh index 691891bae..0590aebb9 100755 --- a/buildscripts/cross-compile.sh +++ b/buildscripts/cross-compile.sh @@ -9,7 +9,7 @@ function _init() { export CGO_ENABLED=0 ## List of architectures and OS to test coss compilation. - SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64" + SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64" } function _build() { From 417c8648f0ae0676ff8e1a9d1ab83ec61d2f92c2 Mon Sep 17 00:00:00 2001 From: Sung Jeon Date: Mon, 9 Jun 2025 01:13:30 +0900 Subject: [PATCH 6/7] use provided region in tier configuration for S3 backend (#21365) fixes #21364 --- cmd/warm-backend-s3.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/warm-backend-s3.go b/cmd/warm-backend-s3.go index f46b88fb6..5905923b7 100644 --- a/cmd/warm-backend-s3.go +++ b/cmd/warm-backend-s3.go @@ -163,6 +163,7 @@ func newWarmBackendS3(conf madmin.TierS3, tier string) (*warmBackendS3, error) { Creds: creds, Secure: u.Scheme == "https", Transport: globalRemoteTargetTransport, + Region: conf.Region, } client, err := minio.New(u.Host, opts) if err != nil { From 21409f112dc299966a3beca89ead13f5045ecc33 Mon Sep 17 00:00:00 2001 From: Johannes Horn <117528519+hornjo@users.noreply.github.com> Date: Sun, 8 Jun 2025 18:14:18 +0200 Subject: [PATCH 7/7] add networkpolicy for job and add possibility to define egress ports (#20951) --- helm/minio/templates/networkpolicy.yaml | 40 ++++++++++++++++++ helm/minio/values.yaml | 54 ++++++++++++++++++++----- 2 files changed, 84 insertions(+), 10 deletions(-) diff --git a/helm/minio/templates/networkpolicy.yaml b/helm/minio/templates/networkpolicy.yaml index b9c077171..bb45a6c63 100644 --- a/helm/minio/templates/networkpolicy.yaml +++ b/helm/minio/templates/networkpolicy.yaml @@ -16,11 +16,51 @@ spec: ingress: - ports: - port: {{ .Values.minioAPIPort }} + protocol: TCP - port: {{ .Values.minioConsolePort }} + protocol: TCP {{- if not .Values.networkPolicy.allowExternal }} from: - podSelector: matchLabels: {{ template "minio.name" . }}-client: "true" {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + egress: + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} +--- +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }}-post-job + labels: + app: {{ template "minio.name" . }}-post-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} + egress: + - ports: + - port: {{ .Values.minioAPIPort }} + protocol: TCP + - port: {{ .Values.minioConsolePort }} + protocol: TCP + {{- if .Values.networkPolicy.egress.enabled }} + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} {{- end }} diff --git a/helm/minio/values.yaml b/helm/minio/values.yaml index 4c9714ea9..82b3dd8bb 100644 --- a/helm/minio/values.yaml +++ b/helm/minio/values.yaml @@ -200,9 +200,11 @@ service: ingress: enabled: false ingressClassName: ~ - labels: {} + labels: + {} # node-role.kubernetes.io/ingress: platform - annotations: {} + annotations: + {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # kubernetes.io/ingress.allow-http: "false" @@ -241,9 +243,11 @@ consoleService: consoleIngress: enabled: false ingressClassName: ~ - labels: {} + labels: + {} # node-role.kubernetes.io/ingress: platform - annotations: {} + annotations: + {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # kubernetes.io/ingress.allow-http: "false" @@ -391,7 +395,8 @@ makeUserJob: ## List of service accounts to be created after minio install ## -svcaccts: [] +svcaccts: + [] ## accessKey, secretKey and parent user to be assigned to the service accounts ## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts # - accessKey: console-svcacct @@ -430,7 +435,8 @@ makeServiceAccountJob: ## List of buckets to be created after minio install ## -buckets: [] +buckets: + [] # # Name of the bucket # - name: bucket1 # # Policy to be set on the @@ -479,13 +485,15 @@ customCommandJob: requests: memory: 128Mi ## Additional volumes to add to the post-job. - extraVolumes: [] + extraVolumes: + [] # - name: extra-policies # configMap: # name: my-extra-policies-cm ## Additional volumeMounts to add to the custom commands container when ## running the post-job. - extraVolumeMounts: [] + extraVolumeMounts: + [] # - name: extra-policies # mountPath: /mnt/extras/ # Command to run after the main command on exit @@ -542,10 +550,35 @@ networkPolicy: # Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) # or Cilium Network Policies (flavor: cilium) flavor: kubernetes + # allows external access to the minio api allowExternal: true + ## @params networkPolicy.egress configuration of the egress traffic + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing minio to connect to external data sources from kubernetes cluster. + ## + enabled: false + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## - port: 53 + ## protocol: UDP + ## + ports: [] + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [minio]} + ## + to: [] # only when using flavor: cilium egressEntities: - - kube-apiserver + - kube-apiserver ## PodDisruptionBudget settings ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ @@ -573,7 +606,8 @@ metrics: # for node metrics relabelConfigs: {} # for cluster metrics - relabelConfigsCluster: {} + relabelConfigsCluster: + {} # metricRelabelings: # - regex: (server|pod) # action: labeldrop