Merge branch 'master' into fix-SinglePool-is-not-restricted-by-the-minimum-free-drive-threshold

This commit is contained in:
jiuker 2025-06-09 15:13:40 +08:00 committed by GitHub
commit 4e08f3596f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 172 additions and 78 deletions

View File

@ -74,11 +74,11 @@ check_minimum_version() {
assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
exit 1
;;
esac

View File

@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0
## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
}
function _build() {

View File

@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
// - Force encoding of '~'
func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
for i := range len(s) {
c := s[i]
if shouldEscape(c) {
if c == ' ' {
@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
for i := range len(s) {
if s[i] == ' ' {
t[i] = '+'
}
@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
}
j := 0
for i := 0; i < len(s); i++ {
for i := range len(s) {
switch c := s[i]; {
case c == ' ':
t[j] = '+'

View File

@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
bgSeq := newBgHealSequence()
// Run the background healer
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
for range globalBackgroundHealRoutine.workers {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
}

View File

@ -248,7 +248,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
pInfo PartInfo
)
for i := 0; i < partsCount; i++ {
for i := range partsCount {
gopts := minio.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
PartNumber: i + 1,

View File

@ -1089,6 +1089,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
break
}
// check if have a file
if reader == nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
if keyName, ok := formValues["Key"]; !ok {
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))

View File

@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
sys.RUnlock()
// send last n console log messages in order filtered by node
if cnt > 0 {
for i := 0; i < last; i++ {
for i := range last {
entry := lastN[(cnt+i)%last]
if (entry == log.Info{}) {
continue

View File

@ -1481,7 +1481,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
}
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk

View File

@ -504,7 +504,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
// count the number of offline disks
offline := 0
for i := 0; i < len(errs); i++ {
for i := range len(errs) {
var found bool
switch {
case errors.Is(errs[i], errDiskNotFound):
@ -1221,7 +1221,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
partsMetadata[index].SetInlineData()
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
@ -1557,7 +1557,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
@ -1574,7 +1574,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
if len(versions) == 0 {
// Whether a disk was initially or becomes offline
// during this upload, send it to the MRF list.
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}

View File

@ -149,7 +149,7 @@ func (z *erasureServerPools) findIndex(index int) int {
if z.rebalMeta == nil {
return 0
}
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
for i := range len(z.rebalMeta.PoolStats) {
if i == index {
return index
}

View File

@ -95,7 +95,7 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI {
s.erasureDisksMu.RLock()
defer s.erasureDisksMu.RUnlock()
for i := 0; i < s.setCount; i++ {
for i := range s.setCount {
for j := 0; j < s.setDriveCount; j++ {
disk := s.erasureDisks[i][j]
if disk == OfflineDisk {
@ -150,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
if diskID == offlineDiskUUID {
return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID)
}
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
for i := range len(refFormat.Erasure.Sets) {
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
if refFormat.Erasure.Sets[i][j] == diskID {
return i, j, nil
@ -174,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This)
}
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
for i := range len(refFormat.Erasure.Sets) {
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
if refFormat.Erasure.Sets[i][j] == format.Erasure.This {
return i, j, nil
@ -377,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
mutex := newNSLock(globalIsDistErasure)
for i := 0; i < setCount; i++ {
for i := range setCount {
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
}
@ -390,7 +390,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
var wg sync.WaitGroup
var lk sync.Mutex
for i := 0; i < setCount; i++ {
for i := range setCount {
lockerEpSet := set.NewStringSet()
for j := 0; j < setDriveCount; j++ {
wg.Add(1)
@ -409,7 +409,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
}
wg.Wait()
for i := 0; i < setCount; i++ {
for i := range setCount {
wg.Add(1)
go func(i int) {
defer wg.Done()

View File

@ -98,7 +98,7 @@ func fmtGenMain(ctxt *cli.Context) {
setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet
format := newFormatErasureV3(setCount, setDriveCount)
format.ID = deploymentID
for i := 0; i < setCount; i++ { // for each erasure set
for i := range setCount { // for each erasure set
for j := 0; j < setDriveCount; j++ {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]

View File

@ -157,7 +157,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3
format.Erasure.Sets = make([][]string, numSets)
for i := 0; i < numSets; i++ {
for i := range numSets {
format.Erasure.Sets[i] = make([]string, setLen)
for j := 0; j < setLen; j++ {
format.Erasure.Sets[i][j] = mustGetUUID()
@ -514,7 +514,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
}
// Make sure that the diskID is found in the set.
for i := 0; i < len(tmpFormat.Erasure.Sets); i++ {
for i := range len(tmpFormat.Erasure.Sets) {
for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ {
if this == tmpFormat.Erasure.Sets[i][j] {
return nil
@ -639,7 +639,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
return nil, err
}
for i := 0; i < setCount; i++ {
for i := range setCount {
hostCount := make(map[string]int, setDriveCount)
for j := 0; j < setDriveCount; j++ {
disk := storageDisks[i*setDriveCount+j]

View File

@ -49,61 +49,61 @@ const (
var (
bucketReplLastHrFailedBytesMD = NewGaugeMD(bucketReplLastHrFailedBytes,
"Total number of bytes failed at least once to replicate in the last hour on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastHrFailedCountMD = NewGaugeMD(bucketReplLastHrFailedCount,
"Total number of objects which failed replication in the last hour on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastMinFailedBytesMD = NewGaugeMD(bucketReplLastMinFailedBytes,
"Total number of bytes failed at least once to replicate in the last full minute on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastMinFailedCountMD = NewGaugeMD(bucketReplLastMinFailedCount,
"Total number of objects which failed replication in the last full minute on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLatencyMsMD = NewGaugeMD(bucketReplLatencyMs,
"Replication latency on a bucket in milliseconds",
bucketL, operationL, rangeL, targetArnL)
bucketReplProxiedDeleteTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsTotal,
"Number of DELETE tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetRequestsFailures,
"Number of failures in GET requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetRequestsTotalMD = NewCounterMD(bucketReplProxiedGetRequestsTotal,
"Number of GET requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsFailures,
"Number of failures in GET tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsTotal,
"Number of GET tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedHeadRequestsFailuresMD = NewCounterMD(bucketReplProxiedHeadRequestsFailures,
"Number of failures in HEAD requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedHeadRequestsTotalMD = NewCounterMD(bucketReplProxiedHeadRequestsTotal,
"Number of HEAD requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedPutTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsFailures,
"Number of failures in PUT tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedPutTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsTotal,
"Number of PUT tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplSentBytesMD = NewCounterMD(bucketReplSentBytes,
"Total number of bytes replicated to the target",
bucketL)
bucketL, targetArnL)
bucketReplSentCountMD = NewCounterMD(bucketReplSentCount,
"Total number of objects replicated to the target",
bucketL)
bucketL, targetArnL)
bucketReplTotalFailedBytesMD = NewCounterMD(bucketReplTotalFailedBytes,
"Total number of bytes failed at least once to replicate since server start",
bucketL)
bucketL, targetArnL)
bucketReplTotalFailedCountMD = NewCounterMD(bucketReplTotalFailedCount,
"Total number of objects which failed replication since server start",
bucketL)
bucketL, targetArnL)
bucketReplProxiedDeleteTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsFailures,
"Number of failures in DELETE tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
)
// loadBucketReplicationMetrics - `BucketMetricsLoaderFn` for bucket replication metrics
@ -121,11 +121,11 @@ func loadBucketReplicationMetrics(ctx context.Context, m MetricValues, c *metric
bucketReplStats := globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage)
for _, bucket := range buckets {
labels := []string{bucketL, bucket}
if s, ok := bucketReplStats[bucket]; ok {
stats := s.ReplicationStats
if stats.hasReplicationUsage() {
for arn, stat := range stats.Stats {
labels := []string{bucketL, bucket, targetArnL, arn}
m.Set(bucketReplLastHrFailedBytes, float64(stat.Failed.LastHour.Bytes), labels...)
m.Set(bucketReplLastHrFailedCount, float64(stat.Failed.LastHour.Count), labels...)
m.Set(bucketReplLastMinFailedBytes, float64(stat.Failed.LastMinute.Bytes), labels...)

View File

@ -266,7 +266,7 @@ func (m *mrfState) healRoutine(z *erasureServerPools) {
if len(u.Versions) > 0 {
vers := len(u.Versions) / 16
if vers > 0 {
for i := 0; i < vers; i++ {
for i := range vers {
healObject(u.Bucket, u.Object, uuid.UUID(u.Versions[16*i:]).String(), scan)
}
}

View File

@ -123,7 +123,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
}
retryCount := g.retryCount
for i := 0; i < retryCount; i++ {
for i := range retryCount {
g.errs[index].Err = nil
if err := f(); err != nil {
g.errs[index].Err = err

View File

@ -1938,6 +1938,10 @@ func TestListObjectsWithILM(t *testing.T) {
}
func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
// Prepare lifecycle expiration workers
es := newExpiryState(t1.Context(), obj, 0)
globalExpiryState = es
t, _ := t1.(*testing.T)
objContent := "test-content"
@ -1977,7 +1981,12 @@ func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHand
t.Fatalf("%s : %s", instanceType, err.Error())
}
globalBucketMetadataSys.Set(upload.bucket, BucketMetadata{lifecycleConfig: lifecycleConfig})
metadata, err := globalBucketMetadataSys.Get(upload.bucket)
if err != nil {
t.Fatal(err)
}
metadata.lifecycleConfig = lifecycleConfig
globalBucketMetadataSys.Set(upload.bucket, metadata)
defer globalBucketMetadataSys.Remove(upload.bucket)
// Upload objects which modtime as one week ago, supposed to be expired by ILM

View File

@ -128,7 +128,7 @@ func IsValidBucketName(bucket string) bool {
// 'label' in AWS terminology and if the bucket looks
// like an IP address.
isNotNumber := false
for i := 0; i < len(piece); i++ {
for i := range len(piece) {
switch {
case (piece[i] >= 'a' && piece[i] <= 'z' ||
piece[i] == '-'):
@ -254,11 +254,11 @@ func concat(ss ...string) string {
}
// create & allocate the memory in advance.
n := 0
for i := 0; i < length; i++ {
for i := range length {
n += len(ss[i])
}
b := make([]byte, 0, n)
for i := 0; i < length; i++ {
for i := range length {
b = append(b, ss[i]...)
}
return unsafe.String(unsafe.SliceData(b), n)

View File

@ -77,7 +77,7 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) {
func setupTestReadDirFiles(t *testing.T) (testResults []result) {
dir := t.TempDir()
entries := []string{}
for i := 0; i < 10; i++ {
for i := range 10 {
name := fmt.Sprintf("file-%d", i)
if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
@ -102,7 +102,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) {
t.Fatalf("Unable to create prefix directory \"mydir\", %s", err)
}
entries := []string{"mydir/"}
for i := 0; i < 10; i++ {
for i := range 10 {
name := fmt.Sprintf("file-%d", i)
if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
@ -126,7 +126,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) {
}
dir := t.TempDir()
entries := []string{}
for i := 0; i < 10; i++ {
for i := range 10 {
name1 := fmt.Sprintf("file-%d", i)
name2 := fmt.Sprintf("file-%d", i+10)
if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {

View File

@ -452,7 +452,9 @@ func initAllSubsystems(ctx context.Context) {
globalNotificationSys = NewNotificationSys(globalEndpoints)
// Create new notification system
globalEventNotifier = NewEventNotifier(GlobalContext)
if globalEventNotifier == nil {
globalEventNotifier = NewEventNotifier(GlobalContext)
}
// Create new bucket metadata system.
if globalBucketMetadataSys == nil {

View File

@ -102,7 +102,7 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT
var totalUploadTimes madmin.TimeDurations
var totalDownloadTimes madmin.TimeDurations
var totalDownloadTTFB madmin.TimeDurations
for i := 0; i < len(throughputHighestResults); i++ {
for i := range len(throughputHighestResults) {
errStr := ""
if throughputHighestResults[i].Error != "" {
errStr = throughputHighestResults[i].Error

View File

@ -675,7 +675,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
versions := make([]FileInfoVersions, totalVersions)
decoder := msgpNewReader(r.Body)
defer readMsgpReaderPoolPut(decoder)
for i := 0; i < totalVersions; i++ {
for i := range totalVersions {
dst := &versions[i]
if err := dst.DecodeMsg(decoder); err != nil {
s.writeErrorResponse(w, err)

View File

@ -851,7 +851,7 @@ func lcp(strs []string, pre bool) string {
// compare letters
if pre {
// prefix, iterate left to right
for i := 0; i < maxl; i++ {
for i := range maxl {
if xfix[i] != str[i] {
xfix = xfix[:i]
break
@ -859,7 +859,7 @@ func lcp(strs []string, pre bool) string {
}
} else {
// suffix, iterate right to left
for i := 0; i < maxl; i++ {
for i := range maxl {
xi := xfixl - i - 1
si := strl - i - 1
if xfix[xi] != str[si] {

View File

@ -163,6 +163,7 @@ func newWarmBackendS3(conf madmin.TierS3, tier string) (*warmBackendS3, error) {
Creds: creds,
Secure: u.Scheme == "https",
Transport: globalRemoteTargetTransport,
Region: conf.Region,
}
client, err := minio.New(u.Host, opts)
if err != nil {

View File

@ -846,7 +846,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte,
// Any non-nil error is returned.
func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
var tHdr, tMeta []byte // Zero copy bytes
for i := 0; i < versions; i++ {
for i := range versions {
tHdr, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return err

View File

@ -16,11 +16,51 @@ spec:
ingress:
- ports:
- port: {{ .Values.minioAPIPort }}
protocol: TCP
- port: {{ .Values.minioConsolePort }}
protocol: TCP
{{- if not .Values.networkPolicy.allowExternal }}
from:
- podSelector:
matchLabels:
{{ template "minio.name" . }}-client: "true"
{{- end }}
{{- if .Values.networkPolicy.egress.enabled }}
egress:
- ports:
{{ .Values.networkPolicy.egress.ports | toJson }}
{{- with .Values.networkPolicy.egress.to }}
to:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
---
kind: NetworkPolicy
apiVersion: {{ template "minio.networkPolicy.apiVersion" . }}
metadata:
name: {{ template "minio.fullname" . }}-post-job
labels:
app: {{ template "minio.name" . }}-post-job
chart: {{ template "minio.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
podSelector:
matchLabels:
app: {{ template "minio.name" . }}-job
release: {{ .Release.Name }}
egress:
- ports:
- port: {{ .Values.minioAPIPort }}
protocol: TCP
- port: {{ .Values.minioConsolePort }}
protocol: TCP
{{- if .Values.networkPolicy.egress.enabled }}
- ports:
{{ .Values.networkPolicy.egress.ports | toJson }}
{{- with .Values.networkPolicy.egress.to }}
to:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -200,9 +200,11 @@ service:
ingress:
enabled: false
ingressClassName: ~
labels: {}
labels:
{}
# node-role.kubernetes.io/ingress: platform
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
@ -241,9 +243,11 @@ consoleService:
consoleIngress:
enabled: false
ingressClassName: ~
labels: {}
labels:
{}
# node-role.kubernetes.io/ingress: platform
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# kubernetes.io/ingress.allow-http: "false"
@ -391,7 +395,8 @@ makeUserJob:
## List of service accounts to be created after minio install
##
svcaccts: []
svcaccts:
[]
## accessKey, secretKey and parent user to be assigned to the service accounts
## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts
# - accessKey: console-svcacct
@ -430,7 +435,8 @@ makeServiceAccountJob:
## List of buckets to be created after minio install
##
buckets: []
buckets:
[]
# # Name of the bucket
# - name: bucket1
# # Policy to be set on the
@ -479,13 +485,15 @@ customCommandJob:
requests:
memory: 128Mi
## Additional volumes to add to the post-job.
extraVolumes: []
extraVolumes:
[]
# - name: extra-policies
# configMap:
# name: my-extra-policies-cm
## Additional volumeMounts to add to the custom commands container when
## running the post-job.
extraVolumeMounts: []
extraVolumeMounts:
[]
# - name: extra-policies
# mountPath: /mnt/extras/
# Command to run after the main command on exit
@ -542,10 +550,35 @@ networkPolicy:
# Specifies whether the policies created will be standard Network Policies (flavor: kubernetes)
# or Cilium Network Policies (flavor: cilium)
flavor: kubernetes
# allows external access to the minio api
allowExternal: true
## @params networkPolicy.egress configuration of the egress traffic
egress:
## @param networkPolicy.egress.enabled When enabled, an egress network policy will be
## created allowing minio to connect to external data sources from kubernetes cluster.
##
enabled: false
## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress
## Add ports to the egress by specifying - port: <port number>
## E.X.
## - port: 80
## - port: 443
## - port: 53
## protocol: UDP
##
ports: []
## @param networkPolicy.egress.to Allow egress traffic to specific destinations
## Add destinations to the egress by specifying - ipBlock: <CIDR>
## E.X.
## to:
## - namespaceSelector:
## matchExpressions:
## - {key: role, operator: In, values: [minio]}
##
to: []
# only when using flavor: cilium
egressEntities:
- kube-apiserver
- kube-apiserver
## PodDisruptionBudget settings
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
@ -573,7 +606,8 @@ metrics:
# for node metrics
relabelConfigs: {}
# for cluster metrics
relabelConfigsCluster: {}
relabelConfigsCluster:
{}
# metricRelabelings:
# - regex: (server|pod)
# action: labeldrop

View File

@ -381,7 +381,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int)
lockNotFound, lockRefreshed := 0, 0
done := false
for i := 0; i < len(restClnts); i++ {
for range len(restClnts) {
select {
case refreshResult := <-ch:
if refreshResult.offline {

View File

@ -357,7 +357,7 @@ func (list *TargetList) startSendWorkers(workerCount int) {
if err != nil {
panic(err)
}
for i := 0; i < workerCount; i++ {
for range workerCount {
wk.Take()
go func() {
defer wk.Give()

View File

@ -1041,7 +1041,7 @@ func (c *Connection) readStream(ctx context.Context, conn net.Conn, cancel conte
// Handle merged messages.
messages := int(m.Seq)
c.inMessages.Add(int64(messages))
for i := 0; i < messages; i++ {
for range messages {
if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected {
return
}

View File

@ -143,7 +143,7 @@ func (t *TestGrid) WaitAllConnect(ctx context.Context) {
}
func getHosts(n int) (hosts []string, listeners []net.Listener, err error) {
for i := 0; i < n; i++ {
for range n {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {

View File

@ -574,7 +574,7 @@ func (m *muxClient) ack(seq uint32) {
return
}
available := cap(m.outBlock)
for i := 0; i < available; i++ {
for range available {
m.outBlock <- struct{}{}
}
m.acked = true

View File

@ -130,7 +130,7 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea
// Fill outbound block.
// Each token represents a message that can be sent to the client without blocking.
// The client will refill the tokens as they confirm delivery of the messages.
for i := 0; i < outboundCap; i++ {
for range outboundCap {
m.outBlock <- struct{}{}
}

View File

@ -230,7 +230,7 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error {
}()
// Start parsers
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
for range runtime.GOMAXPROCS(0) {
go func() {
for in := range r.input {
if len(in.input) == 0 {

View File

@ -173,7 +173,7 @@ func (r *PReader) startReaders() {
}()
// Start parsers
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
for range runtime.GOMAXPROCS(0) {
go func() {
for in := range r.input {
if len(in.input) == 0 {

View File

@ -332,7 +332,7 @@ func (d *Decoder) u4() rune {
// logic taken from:
// github.com/buger/jsonparser/blob/master/escape.go#L20
var h [4]int
for i := 0; i < 4; i++ {
for i := range 4 {
c := d.next()
switch {
case c >= '0' && c <= '9':