Compare commits

...

2 Commits

Author SHA1 Message Date
Manuel Reis e39f85e6bb
Merge branch 'master' into dev 2025-09-29 14:30:23 +02:00
Klaus Post b8631cf531
Use new gofumpt (#21613)
VulnCheck / Analysis (push) Has been cancelled Details
Update tinylib. Should fix CI.

`gofumpt -w .&&go generate ./...`
2025-09-28 13:59:21 -07:00
171 changed files with 881 additions and 899 deletions

View File

@ -193,27 +193,27 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) {
result.Cfg, err = readServerConfig(ctx, objectAPI, nil)
if err != nil {
return
return result, err
}
result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes))
if err != nil {
return
return result, err
}
result.SubSys, _, _, err = config.GetSubSys(string(kvBytes))
if err != nil {
return
return result, err
}
tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes))
if err != nil {
return
return result, err
}
ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts)
if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil {
err = badConfigErr{Err: verr}
return
return result, err
}
// Check if subnet proxy being set and if so set the same value to proxy of subnet
@ -222,12 +222,12 @@ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (re
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil {
return
return result, err
}
// Write the config input KV to history.
err = saveServerConfigHistory(ctx, objectAPI, kvBytes)
return
return result, err
}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}

View File

@ -380,7 +380,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
if host == "" {
return
return proxy
}
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Host == host && !proxyEp.IsLocal {
@ -389,5 +389,5 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
}
}
}
return
return proxy
}

View File

@ -70,7 +70,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) {
opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true"
return
return opts
}
// SRPeerJoin - PUT /minio/admin/v3/site-replication/join
@ -422,7 +422,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) {
opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true"
opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true"
return
return opts
}
// SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit
@ -484,7 +484,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true"
opts.Metrics = q.Get("metrics") == "true"
return
return opts
}
// SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove

View File

@ -1243,17 +1243,17 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if hip.objPrefix != "" {
// Bucket is required if object-prefix is given
err = ErrHealMissingBucket
return
return hip, err
}
} else if isReservedOrInvalidBucket(hip.bucket, false) {
err = ErrInvalidBucketName
return
return hip, err
}
// empty prefix is valid.
if !IsValidObjectPrefix(hip.objPrefix) {
err = ErrInvalidObjectName
return
return hip, err
}
if len(qParams[mgmtClientToken]) > 0 {
@ -1275,7 +1275,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if (hip.forceStart && hip.forceStop) ||
(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
err = ErrInvalidRequest
return
return hip, err
}
// ignore body if clientToken is provided
@ -1284,12 +1284,12 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if jerr != nil {
adminLogIf(GlobalContext, jerr, logger.ErrorKind)
err = ErrRequestBodyParse
return
return hip, err
}
}
err = ErrNone
return
return hip, err
}
// HealHandler - POST /minio/admin/v3/heal/
@ -2022,7 +2022,7 @@ func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err err
opts.OS = true
// Older mc - cannot deal with more types...
}
return
return opts, err
}
// TraceHandler - POST /minio/admin/v3/trace

View File

@ -31,7 +31,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
return prefix, marker, delimiter, maxkeys, encodingType, errCode
}
} else {
maxkeys = maxObjectList
@ -41,7 +41,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string,
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
return prefix, marker, delimiter, maxkeys, encodingType, errCode
}
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
@ -51,7 +51,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
}
} else {
maxkeys = maxObjectList
@ -62,7 +62,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
return
return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode
}
// Parse bucket url queries for ListObjects V2.
@ -73,7 +73,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
if val, ok := values["continuation-token"]; ok {
if len(val[0]) == 0 {
errCode = ErrIncorrectContinuationToken
return
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
}
}
@ -81,7 +81,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
}
} else {
maxkeys = maxObjectList
@ -97,11 +97,11 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit
decodedToken, err := base64.StdEncoding.DecodeString(token)
if err != nil {
errCode = ErrIncorrectContinuationToken
return
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
}
token = string(decodedToken)
}
return
return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode
}
// Parse bucket url queries for ?uploads
@ -112,7 +112,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads
return
return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
}
} else {
maxUploads = maxUploadsList
@ -123,7 +123,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode
}
// Parse object url queries
@ -134,7 +134,7 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
if values.Get("max-parts") != "" {
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts
return
return uploadID, partNumberMarker, maxParts, encodingType, errCode
}
} else {
maxParts = maxPartsList
@ -143,11 +143,11 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m
if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker
return
return uploadID, partNumberMarker, maxParts, encodingType, errCode
}
}
uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type")
return
return uploadID, partNumberMarker, maxParts, encodingType, errCode
}

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"time"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -99,7 +99,7 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return alg
}
}
return
return a
}
func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)
@ -59,19 +59,17 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) {
if z.MinioEnv == nil {
z.MinioEnv = make(map[string]string, zb0003)
} else if len(z.MinioEnv) > 0 {
for key := range z.MinioEnv {
delete(z.MinioEnv, key)
}
clear(z.MinioEnv)
}
for zb0003 > 0 {
zb0003--
var za0002 string
var za0003 string
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MinioEnv")
return
}
var za0003 string
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "MinioEnv", za0002)
@ -240,14 +238,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.MinioEnv == nil {
z.MinioEnv = make(map[string]string, zb0003)
} else if len(z.MinioEnv) > 0 {
for key := range z.MinioEnv {
delete(z.MinioEnv, key)
}
clear(z.MinioEnv)
}
for zb0003 > 0 {
var za0002 string
var za0003 string
zb0003--
var za0002 string
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MinioEnv")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -592,7 +592,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
output[idx] = obj
idx++
}
return
return output
}
// Disable timeouts and cancellation

View File

@ -248,19 +248,19 @@ func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Req
if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 {
proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr)
}
return
return subToken, proxied, success
}
func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) {
if len(globalProxyEndpoints) == 0 {
return
return proxied, success
}
if index < 0 || index >= len(globalProxyEndpoints) {
return
return proxied, success
}
ep := globalProxyEndpoints[index]
if ep.IsLocal {
return
return proxied, success
}
return true, proxyRequest(ctx, w, r, ep, returnErr)
}

View File

@ -161,7 +161,7 @@ func (b BucketMetadata) lastUpdate() (t time.Time) {
t = b.BucketTargetsConfigMetaUpdatedAt
}
return
return t
}
// Versioning returns true if versioning is enabled
@ -542,13 +542,13 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje
func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) {
if GlobalKMS == nil {
output = input
return
return output, metabytes, err
}
metadata := make(map[string]string)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
if err != nil {
return
return output, metabytes, err
}
outbuf := bytes.NewBuffer(nil)
@ -561,7 +561,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
}
metabytes, err = json.Marshal(metadata)
if err != nil {
return
return output, metabytes, err
}
return outbuf.Bytes(), metabytes, nil
}

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -97,7 +97,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
}
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
}
return
return quotaCfg, err
}
func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -172,13 +172,13 @@ func (ri ReplicateObjectInfo) TargetReplicationStatus(arn string) (status replic
repStatMatches := replStatusRegex.FindAllStringSubmatch(ri.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 {
return
return status
}
if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2])
}
}
return
return status
}
// TargetReplicationStatus - returns replication status of a target
@ -186,13 +186,13 @@ func (o ObjectInfo) TargetReplicationStatus(arn string) (status replication.Stat
repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1)
for _, repStatMatch := range repStatMatches {
if len(repStatMatch) != 3 {
return
return status
}
if repStatMatch[1] == arn {
return replication.StatusType(repStatMatch[2])
}
}
return
return status
}
type replicateTargetDecision struct {
@ -310,7 +310,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
targetsMap: make(map[string]replicateTargetDecision),
}
if len(s) == 0 {
return
return r, err
}
for p := range strings.SplitSeq(s, ",") {
if p == "" {
@ -327,7 +327,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
}
r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]}
}
return
return r, err
}
// ReplicationState represents internal replication state
@ -374,7 +374,7 @@ func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusT
case !rs.ReplicaStatus.Empty():
return rs.ReplicaStatus
default:
return
return st
}
}
@ -737,7 +737,7 @@ type BucketReplicationResyncStatus struct {
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
m = make(map[string]TargetReplicationResyncStatus)
maps.Copy(m, rs.TargetsMap)
return
return m
}
func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus {
@ -774,7 +774,7 @@ func extractReplicateDiffOpts(q url.Values) (opts madmin.ReplDiffOpts) {
opts.Verbose = q.Get("verbose") == "true"
opts.ARN = q.Get("arn")
opts.Prefix = q.Get("prefix")
return
return opts
}
const (

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/minio/minio/internal/bucket/replication"
"github.com/tinylib/msgp/msgp"
@ -41,19 +41,17 @@ func (z *BucketReplicationResyncStatus) DecodeMsg(dc *msgp.Reader) (err error) {
if z.TargetsMap == nil {
z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002)
} else if len(z.TargetsMap) > 0 {
for key := range z.TargetsMap {
delete(z.TargetsMap, key)
}
clear(z.TargetsMap)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 TargetReplicationResyncStatus
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "TargetsMap")
return
}
var za0002 TargetReplicationResyncStatus
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "TargetsMap", za0001)
@ -203,14 +201,12 @@ func (z *BucketReplicationResyncStatus) UnmarshalMsg(bts []byte) (o []byte, err
if z.TargetsMap == nil {
z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002)
} else if len(z.TargetsMap) > 0 {
for key := range z.TargetsMap {
delete(z.TargetsMap, key)
}
clear(z.TargetsMap)
}
for zb0002 > 0 {
var za0001 string
var za0002 TargetReplicationResyncStatus
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TargetsMap")
@ -288,19 +284,17 @@ func (z *MRFReplicateEntries) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Entries == nil {
z.Entries = make(map[string]MRFReplicateEntry, zb0002)
} else if len(z.Entries) > 0 {
for key := range z.Entries {
delete(z.Entries, key)
}
clear(z.Entries)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 MRFReplicateEntry
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
var za0002 MRFReplicateEntry
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
@ -478,14 +472,12 @@ func (z *MRFReplicateEntries) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Entries == nil {
z.Entries = make(map[string]MRFReplicateEntry, zb0002)
} else if len(z.Entries) > 0 {
for key := range z.Entries {
delete(z.Entries, key)
}
clear(z.Entries)
}
for zb0002 > 0 {
var za0001 string
var za0002 MRFReplicateEntry
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Entries")
@ -872,19 +864,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Targets == nil {
z.Targets = make(map[string]replication.StatusType, zb0002)
} else if len(z.Targets) > 0 {
for key := range z.Targets {
delete(z.Targets, key)
}
clear(z.Targets)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 replication.StatusType
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Targets")
return
}
var za0002 replication.StatusType
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Targets", za0001)
@ -902,19 +892,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.PurgeTargets == nil {
z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.PurgeTargets) > 0 {
for key := range z.PurgeTargets {
delete(z.PurgeTargets, key)
}
clear(z.PurgeTargets)
}
for zb0003 > 0 {
zb0003--
var za0003 string
var za0004 VersionPurgeStatusType
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "PurgeTargets")
return
}
var za0004 VersionPurgeStatusType
err = za0004.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets", za0003)
@ -932,19 +920,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) {
if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 {
for key := range z.ResetStatusesMap {
delete(z.ResetStatusesMap, key)
}
clear(z.ResetStatusesMap)
}
for zb0004 > 0 {
zb0004--
var za0005 string
var za0006 string
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")
return
}
var za0006 string
za0006, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap", za0005)
@ -1236,14 +1222,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Targets == nil {
z.Targets = make(map[string]replication.StatusType, zb0002)
} else if len(z.Targets) > 0 {
for key := range z.Targets {
delete(z.Targets, key)
}
clear(z.Targets)
}
for zb0002 > 0 {
var za0001 string
var za0002 replication.StatusType
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Targets")
@ -1266,14 +1250,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.PurgeTargets == nil {
z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.PurgeTargets) > 0 {
for key := range z.PurgeTargets {
delete(z.PurgeTargets, key)
}
clear(z.PurgeTargets)
}
for zb0003 > 0 {
var za0003 string
var za0004 VersionPurgeStatusType
zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "PurgeTargets")
@ -1296,14 +1278,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.ResetStatusesMap == nil {
z.ResetStatusesMap = make(map[string]string, zb0004)
} else if len(z.ResetStatusesMap) > 0 {
for key := range z.ResetStatusesMap {
delete(z.ResetStatusesMap, key)
}
clear(z.ResetStatusesMap)
}
for zb0004 > 0 {
var za0005 string
var za0006 string
zb0004--
var za0005 string
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ResetStatusesMap")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -253,31 +253,31 @@ func getMustReplicateOptions(userDefined map[string]string, userTags string, sta
func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) {
// object layer not initialized we return with no decision.
if newObjectLayerFn() == nil {
return
return dsc
}
// Disable server-side replication on object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension.
if !globalBucketVersioningSys.PrefixEnabled(bucket, object) {
return
return dsc
}
replStatus := mopts.ReplicationStatus()
if replStatus == replication.Replica && !mopts.isMetadataReplication() {
return
return dsc
}
if mopts.replicationRequest { // incoming replication request on target cluster
return
return dsc
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogOnceIf(ctx, err, bucket)
return
return dsc
}
if cfg == nil {
return
return dsc
}
opts := replication.ObjectOpts{
@ -348,16 +348,16 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
replLogOnceIf(ctx, err, bucket)
return
return dsc
}
// If incoming request is a replication request, it does not need to be re-replicated.
if delOpts.ReplicationRequest {
return
return dsc
}
// Skip replication if this object's prefix is excluded from being
// versioned.
if !delOpts.Versioned {
return
return dsc
}
opts := replication.ObjectOpts{
Name: dobj.ObjectName,
@ -617,10 +617,10 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if dobj.VersionID == "" && rinfo.PrevReplicationStatus == replication.Completed && dobj.OpType != replication.ExistingObjectReplicationType {
rinfo.ReplicationStatus = rinfo.PrevReplicationStatus
return
return rinfo
}
if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete {
return
return rinfo
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
@ -641,7 +641,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
} else {
rinfo.VersionPurgeStatus = replication.VersionPurgeFailed
}
return
return rinfo
}
// early return if already replicated delete marker for existing object replication/ healing delete markers
if dobj.DeleteMarkerVersionID != "" {
@ -658,13 +658,13 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
// delete marker already replicated
if dobj.VersionID == "" && rinfo.VersionPurgeStatus.Empty() {
rinfo.ReplicationStatus = replication.Completed
return
return rinfo
}
case isErrObjectNotFound(serr), isErrVersionNotFound(serr):
// version being purged is already not found on target.
if !rinfo.VersionPurgeStatus.Empty() {
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
return
return rinfo
}
case isErrReadQuorum(serr), isErrWriteQuorum(serr):
// destination has some quorum issues, perform removeObject() anyways
@ -678,7 +678,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
if err != nil && !toi.ReplicationReady {
rinfo.ReplicationStatus = replication.Failed
rinfo.Err = err
return
return rinfo
}
}
}
@ -709,7 +709,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
rinfo.VersionPurgeStatus = replication.VersionPurgeComplete
}
}
return
return rinfo
}
func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string {
@ -910,7 +910,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
}
putOpts.ServerSideEncryption = sseEnc
}
return
return putOpts, isMP, err
}
type replicationAction string
@ -1208,7 +1208,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
if ri.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true
return
return rinfo
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
@ -1220,7 +1220,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
@ -1244,7 +1244,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
})
replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
}
return
return rinfo
}
defer gr.Close()
@ -1268,7 +1268,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
}
@ -1307,7 +1307,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
var headerSize int
@ -1344,7 +1344,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
}
return
return rinfo
}
// replicateAll replicates metadata for specified version of the object to destination bucket
@ -1380,7 +1380,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
@ -1405,7 +1405,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
})
replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
}
return
return rinfo
}
defer gr.Close()
@ -1418,7 +1418,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) {
rinfo.ReplicationStatus = replication.Completed
rinfo.ReplicationResynced = true
return
return rinfo
}
size, err := objInfo.GetActualSize()
@ -1431,7 +1431,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
// Set the encrypted size for SSE-C objects
@ -1494,7 +1494,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
rinfo.ReplicationAction = rAction
rinfo.ReplicationStatus = replication.Completed
}
return
return rinfo
}
} else {
// SSEC objects will refuse HeadObject without the decryption key.
@ -1528,7 +1528,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
}
applyAction:
@ -1594,7 +1594,7 @@ applyAction:
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
return
return rinfo
}
var headerSize int
for k, v := range putOpts.Header() {
@ -1631,7 +1631,7 @@ applyAction:
}
}
}
return
return rinfo
}
func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) {
@ -2677,7 +2677,7 @@ func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool {
// Resync returns true if replication reset is requested
func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) {
if c.Empty() {
return
return r
}
// Now overlay existing object replication choices for target
@ -2693,7 +2693,7 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc Replic
tgtArns := c.Config.FilterTargetArns(opts)
// indicates no matching target with Existing object replication enabled.
if len(tgtArns) == 0 {
return
return r
}
for _, t := range tgtArns {
opts.TargetArn = t
@ -2719,7 +2719,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu
targets: make(map[string]ResyncTargetDecision, len(dsc.targetsMap)),
}
if c.remotes == nil {
return
return r
}
for _, tgt := range c.remotes.Targets {
d, ok := dsc.targetsMap[tgt.Arn]
@ -2731,7 +2731,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu
}
r.targets[d.Arn] = resyncTarget(oi, tgt.Arn, tgt.ResetID, tgt.ResetBeforeDate, tgtStatuses[tgt.Arn])
}
return
return r
}
func targetResetHeader(arn string) string {
@ -2750,28 +2750,28 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
if !ok { // existing object replication is enabled and object version is unreplicated so far.
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested
rd.Replicate = true
return
return rd
}
// For existing object reset - this condition is needed
rd.Replicate = tgtStatus == ""
return
return rd
}
if resetID == "" || resetBeforeDate.Equal(timeSentinel) { // no reset in progress
return
return rd
}
// if already replicated, return true if a new reset was requested.
splits := strings.SplitN(rs, ";", 2)
if len(splits) != 2 {
return
return rd
}
newReset := splits[1] != resetID
if !newReset && tgtStatus == replication.Completed {
// already replicated and no reset requested
return
return rd
}
rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate)
return
return rd
}
const resyncTimeInterval = time.Minute * 1
@ -3422,12 +3422,12 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
roi = getHealReplicateObjectInfo(oi, rcfg)
roi.RetryCount = uint32(retryCount)
if !roi.Dsc.ReplicateAny() {
return
return roi
}
// early return if replication already done, otherwise we need to determine if this
// version is an existing object that needs healing.
if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() {
return
return roi
}
if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() {
@ -3457,14 +3457,14 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
roi.ReplicationStatus == replication.Failed ||
roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending {
globalReplicationPool.Get().queueReplicaDeleteTask(dv)
return
return roi
}
// if replication status is Complete on DeleteMarker and existing object resync required
if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) {
queueReplicateDeletesWrapper(dv, roi.ExistingObjResync)
return
return roi
}
return
return roi
}
if roi.ExistingObjResync.mustResync() {
roi.OpType = replication.ExistingObjectReplicationType
@ -3473,13 +3473,13 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf
case replication.Pending, replication.Failed:
roi.EventType = ReplicateHeal
globalReplicationPool.Get().queueReplicaTask(roi)
return
return roi
}
if roi.ExistingObjResync.mustResync() {
roi.EventType = ReplicateExisting
globalReplicationPool.Get().queueReplicaTask(roi)
}
return
return roi
}
const (

View File

@ -38,7 +38,7 @@ type ReplicationLatency struct {
// Merge two replication latency into a new one
func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) {
newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram)
return
return newReplLatency
}
// Get upload latency of each object size range
@ -49,7 +49,7 @@ func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) {
// Convert nanoseconds to milliseconds
ret[sizeTagToString(k)] = uint64(v.avg() / time.Millisecond)
}
return
return ret
}
// Update replication upload latency with a new value
@ -64,7 +64,7 @@ type ReplicationLastMinute struct {
func (rl ReplicationLastMinute) merge(other ReplicationLastMinute) (nl ReplicationLastMinute) {
nl = ReplicationLastMinute{rl.LastMinute.merge(other.LastMinute)}
return
return nl
}
func (rl *ReplicationLastMinute) addsize(n int64) {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)
@ -617,19 +617,17 @@ func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Stats == nil {
z.Stats = make(map[string]*BucketReplicationStat, zb0002)
} else if len(z.Stats) > 0 {
for key := range z.Stats {
delete(z.Stats, key)
}
clear(z.Stats)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 *BucketReplicationStat
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Stats")
return
}
var za0002 *BucketReplicationStat
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
@ -943,14 +941,12 @@ func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error)
if z.Stats == nil {
z.Stats = make(map[string]*BucketReplicationStat, zb0002)
} else if len(z.Stats) > 0 {
for key := range z.Stats {
delete(z.Stats, key)
}
clear(z.Stats)
}
for zb0002 > 0 {
var za0001 string
var za0002 *BucketReplicationStat
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Stats")
@ -1402,19 +1398,17 @@ func (z *BucketStatsMap) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Stats == nil {
z.Stats = make(map[string]BucketStats, zb0002)
} else if len(z.Stats) > 0 {
for key := range z.Stats {
delete(z.Stats, key)
}
clear(z.Stats)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 BucketStats
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Stats")
return
}
var za0002 BucketStats
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Stats", za0001)
@ -1526,14 +1520,12 @@ func (z *BucketStatsMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Stats == nil {
z.Stats = make(map[string]BucketStats, zb0002)
} else if len(z.Stats) > 0 {
for key := range z.Stats {
delete(z.Stats, key)
}
clear(z.Stats)
}
for zb0002 > 0 {
var za0001 string
var za0002 BucketStats
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Stats")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -285,7 +285,7 @@ func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType str
}
}
}
return
return targets
}
// ListBucketTargets - gets list of bucket targets for this bucket.
@ -668,7 +668,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
// getRemoteARN gets existing ARN for an endpoint or generates a new one.
func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget, deplID string) (arn string, exists bool) {
if target == nil {
return
return arn, exists
}
sys.RLock()
defer sys.RUnlock()
@ -682,7 +682,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar
}
}
if !target.Type.IsValid() {
return
return arn, exists
}
return generateARN(target, deplID), false
}

View File

@ -167,7 +167,7 @@ func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
})
sys.RUnlock()
return
return logs
}
// Cancel - cancels the target

View File

@ -1221,11 +1221,11 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
return err
}
if zb0002 == 0 {
*z = nil
return
return err
}
*z = make(dataUsageHashMap, zb0002)
for i := uint32(0); i < zb0002; i++ {
@ -1234,12 +1234,12 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) {
zb0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
return err
}
(*z)[zb0003] = struct{}{}
}
}
return
return err
}
// EncodeMsg implements msgp.Encodable
@ -1247,16 +1247,16 @@ func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteArrayHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
return err
}
for zb0004 := range z {
err = en.WriteString(zb0004)
if err != nil {
err = msgp.WrapError(err, zb0004)
return
return err
}
}
return
return err
}
// MarshalMsg implements msgp.Marshaler
@ -1266,7 +1266,7 @@ func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) {
for zb0004 := range z {
o = msgp.AppendString(o, zb0004)
}
return
return o, err
}
// UnmarshalMsg implements msgp.Unmarshaler
@ -1275,7 +1275,7 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
return o, err
}
if zb0002 == 0 {
*z = nil
@ -1288,13 +1288,13 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
zb0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
return o, err
}
(*z)[zb0003] = struct{}{}
}
}
o = bts
return
return o, err
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
@ -1303,7 +1303,7 @@ func (z dataUsageHashMap) Msgsize() (s int) {
for zb0004 := range z {
s += msgp.StringPrefixSize + len(zb0004)
}
return
return s
}
//msgp:encode ignore currentScannerCycle

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"time"
@ -36,19 +36,17 @@ func (z *allTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Tiers == nil {
z.Tiers = make(map[string]tierStats, zb0002)
} else if len(z.Tiers) > 0 {
for key := range z.Tiers {
delete(z.Tiers, key)
}
clear(z.Tiers)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 tierStats
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Tiers")
return
}
var za0002 tierStats
var zb0003 uint32
zb0003, err = dc.ReadMapHeader()
if err != nil {
@ -207,14 +205,12 @@ func (z *allTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Tiers == nil {
z.Tiers = make(map[string]tierStats, zb0002)
} else if len(z.Tiers) > 0 {
for key := range z.Tiers {
delete(z.Tiers, key)
}
clear(z.Tiers)
}
for zb0002 > 0 {
var za0001 string
var za0002 tierStats
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tiers")
@ -415,19 +411,17 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntry, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntry
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntry
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -543,14 +537,12 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntry, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntry
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -799,19 +791,17 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV2
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV2
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -864,14 +854,12 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV2, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV2
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -942,19 +930,17 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV3
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV3
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -1007,14 +993,12 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV3, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV3
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -1085,19 +1069,17 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV4
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV4
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -1150,14 +1132,12 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV4, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV4
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -1228,19 +1208,17 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV5
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV5
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -1293,14 +1271,12 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV5, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV5
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -1371,19 +1347,17 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV6
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV6
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -1436,14 +1410,12 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV6, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV6
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -1514,19 +1486,17 @@ func (z *dataUsageCacheV7) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV7, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 dataUsageEntryV7
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Cache")
return
}
var za0002 dataUsageEntryV7
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Cache", za0001)
@ -1579,14 +1549,12 @@ func (z *dataUsageCacheV7) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Cache == nil {
z.Cache = make(map[string]dataUsageEntryV7, zb0002)
} else if len(z.Cache) > 0 {
for key := range z.Cache {
delete(z.Cache, key)
}
clear(z.Cache)
}
for zb0002 > 0 {
var za0001 string
var za0002 dataUsageEntryV7
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cache")
@ -1745,19 +1713,17 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
clear(z.AllTierStats.Tiers)
}
for zb0005 > 0 {
zb0005--
var za0003 string
var za0004 tierStats
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")
return
}
var za0004 tierStats
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
@ -2211,14 +2177,12 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
clear(z.AllTierStats.Tiers)
}
for zb0005 > 0 {
var za0003 string
var za0004 tierStats
zb0005--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")
@ -2984,19 +2948,17 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
clear(z.AllTierStats.Tiers)
}
for zb0005 > 0 {
zb0005--
var za0003 string
var za0004 tierStats
za0003, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")
return
}
var za0004 tierStats
var zb0006 uint32
zb0006, err = dc.ReadMapHeader()
if err != nil {
@ -3192,14 +3154,12 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.AllTierStats.Tiers == nil {
z.AllTierStats.Tiers = make(map[string]tierStats, zb0005)
} else if len(z.AllTierStats.Tiers) > 0 {
for key := range z.AllTierStats.Tiers {
delete(z.AllTierStats.Tiers, key)
}
clear(z.AllTierStats.Tiers)
}
for zb0005 > 0 {
var za0003 string
var za0004 tierStats
zb0005--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AllTierStats", "Tiers")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -56,13 +56,13 @@ func TestDataUsageUpdate(t *testing.T) {
var s os.FileInfo
s, err = os.Stat(item.Path)
if err != nil {
return
return sizeS, err
}
sizeS.totalSize = s.Size()
sizeS.versions++
return sizeS, nil
}
return
return sizeS, err
}
xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()}
xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) {
@ -279,13 +279,13 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
var s os.FileInfo
s, err = os.Stat(item.Path)
if err != nil {
return
return sizeS, err
}
sizeS.totalSize = s.Size()
sizeS.versions++
return
return sizeS, err
}
return
return sizeS, err
}
weSleep := func() bool { return false }
@ -569,13 +569,13 @@ func TestDataUsageCacheSerialize(t *testing.T) {
var s os.FileInfo
s, err = os.Stat(item.Path)
if err != nil {
return
return sizeS, err
}
sizeS.versions++
sizeS.totalSize = s.Size()
return
return sizeS, err
}
return
return sizeS, err
}
xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()}
xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) {

View File

@ -87,7 +87,7 @@ func (d *DummyDataGen) Read(b []byte) (n int, err error) {
}
err = io.EOF
}
return
return n, err
}
func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {

View File

@ -450,7 +450,7 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
}
}
_, err = newEncryptMetadata(r.Context(), kind, keyID, key, bucket, object, metadata, kmsCtx)
return
return err
}
// EncryptRequest takes the client provided content and encrypts the data
@ -855,7 +855,7 @@ func tryDecryptETag(key []byte, encryptedETag string, sses3 bool) string {
func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) {
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
err = errors.New("Object is not encrypted")
return
return encOff, encLength, skipLen, seqNumber, partStart, err
}
if rs == nil {
@ -873,7 +873,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
partSize, err = sio.DecryptedSize(uint64(part.Size))
if err != nil {
err = errObjectTampered
return
return encOff, encLength, skipLen, seqNumber, partStart, err
}
sizes[i] = int64(partSize)
decObjSize += int64(partSize)
@ -883,7 +883,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
partSize, err = sio.DecryptedSize(uint64(o.Size))
if err != nil {
err = errObjectTampered
return
return encOff, encLength, skipLen, seqNumber, partStart, err
}
sizes = []int64{int64(partSize)}
decObjSize = sizes[0]
@ -892,7 +892,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
var off, length int64
off, length, err = rs.GetOffsetLength(decObjSize)
if err != nil {
return
return encOff, encLength, skipLen, seqNumber, partStart, err
}
// At this point, we have:

View File

@ -483,7 +483,7 @@ func TestGetDecryptedRange(t *testing.T) {
cumulativeSum += v
cumulativeEncSum += getEncSize(v)
}
return
return o, l, skip, sn, ps
}
for i, test := range testMPs {

View File

@ -443,7 +443,7 @@ func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err err
layout: setArgs,
})
}
return
return layout, err
}
// mergeDisksLayoutFromArgs supports with and without ellipses transparently.
@ -475,7 +475,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
legacy: true,
pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}},
}
return
return err
}
for _, arg := range args {
@ -489,7 +489,7 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) {
}
ctxt.Layout.pools = append(ctxt.Layout.pools, poolDisksLayout{cmdline: arg, layout: setArgs})
}
return
return err
}
// CreateServerEndpoints - validates and creates new endpoints from input args, supports

View File

@ -267,7 +267,7 @@ func (l EndpointServerPools) ESCount() (count int) {
for _, p := range l {
count += p.SetCount
}
return
return count
}
// GetNodes returns a sorted list of nodes in this cluster
@ -297,7 +297,7 @@ func (l EndpointServerPools) GetNodes() (nodes []Node) {
sort.Slice(nodes, func(i, j int) bool {
return nodes[i].Host < nodes[j].Host
})
return
return nodes
}
// GetPoolIdx return pool index
@ -588,7 +588,7 @@ func (endpoints Endpoints) GetAllStrings() (all []string) {
for _, e := range endpoints {
all = append(all, e.String())
}
return
return all
}
func hostResolveToLocalhost(endpoint Endpoint) bool {

View File

@ -69,7 +69,7 @@ func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int
})
return enc
}
return
return e, err
}
// EncodeData encodes the given data and returns the erasure-coded data.

View File

@ -283,7 +283,7 @@ func countPartNotSuccess(partErrs []int) (c int) {
c++
}
}
return
return c
}
// checkObjectWithAllParts sets partsMetadata and onlineDisks when xl.meta is inexistant/corrupted or outdated
@ -436,5 +436,5 @@ func checkObjectWithAllParts(ctx context.Context, onlineDisks []StorageAPI, part
dataErrsByDisk[disk][part] = dataErrsByPart[part][disk]
}
}
return
return dataErrsByDisk, dataErrsByPart
}

View File

@ -965,7 +965,7 @@ func danglingMetaErrsCount(cerrs []error) (notFoundCount int, nonActionableCount
nonActionableCount++
}
}
return
return notFoundCount, nonActionableCount
}
func danglingPartErrsCount(results []int) (notFoundCount int, nonActionableCount int) {
@ -980,7 +980,7 @@ func danglingPartErrsCount(results []int) (notFoundCount int, nonActionableCount
nonActionableCount++
}
}
return
return notFoundCount, nonActionableCount
}
// Object is considered dangling/corrupted if and only

View File

@ -521,7 +521,7 @@ func listObjectParities(partsMetadata []FileInfo, errs []error) (parities []int)
parities[index] = metadata.Erasure.ParityBlocks
}
}
return
return parities
}
// Returns per object readQuorum and writeQuorum

View File

@ -233,7 +233,7 @@ func (p poolMeta) ResumeBucketObject(idx int) (bucket, object string) {
bucket = p.Pools[idx].Decommission.Bucket
object = p.Pools[idx].Decommission.Object
}
return
return bucket, object
}
func (p *poolMeta) TrackCurrentBucketObject(idx int, bucket string, object string) {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -446,7 +446,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
select {
case <-ctx.Done():
doneCh <- ctx.Err()
return
return err
default:
}
@ -464,7 +464,7 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
}
rebalanceLogIf(GlobalContext, err)
doneCh <- err
return
return err
}
stopFn(0, nil)
z.bucketRebalanceDone(bucket, poolIdx)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -702,7 +702,7 @@ func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
b.StandardSCParity = scParity
b.RRSCParity = rrSCParity
return
return b
}
func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo {

View File

@ -116,7 +116,7 @@ func diskErrToDriveState(err error) (state string) {
state = fmt.Sprintf("%s (cause: %s)", madmin.DriveStateUnknown, err)
}
return
return state
}
func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) {

View File

@ -94,18 +94,18 @@ func availableMemory() (available uint64) {
if limit > 0 {
// A valid value is found, return its 90%
available = (limit * 9) / 10
return
return available
}
} // for all other platforms limits are based on virtual memory.
memStats, err := mem.VirtualMemory()
if err != nil {
return
return available
}
// A valid value is available return its 90%
available = (memStats.Available * 9) / 10
return
return available
}
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) {

View File

@ -466,7 +466,7 @@ func getHostName(r *http.Request) (hostName string) {
} else {
hostName = r.Host
}
return
return hostName
}
// Proxy any request to an endpoint.
@ -500,5 +500,5 @@ func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, e
r.URL.Host = ep.Host
f.ServeHTTP(w, r)
return
return success
}

View File

@ -18,7 +18,11 @@
package cmd
import (
"sync"
"testing"
"time"
xhttp "github.com/minio/minio/internal/http"
)
// Test redactLDAPPwd()
@ -52,3 +56,129 @@ func TestRedactLDAPPwd(t *testing.T) {
}
}
}
// TestHTTPStatsRaceCondition tests the race condition fix for HTTPStats.
// This test specifically addresses the race between:
// - Write operations via updateStats.
// - Read operations via toServerHTTPStats(false).
func TestRaulStatsRaceCondition(t *testing.T) {
httpStats := newHTTPStats()
// Simulate the concurrent scenario from the original race condition:
// Multiple HTTP request handlers updating stats concurrently,
// while background processes are reading the stats for persistence.
const numWriters = 100 // Simulate many HTTP request handlers.
const numReaders = 50 // Simulate background stats readers.
const opsPerGoroutine = 100
var wg sync.WaitGroup
for i := range numWriters {
wg.Add(1)
go func(writerID int) {
defer wg.Done()
for j := 0; j < opsPerGoroutine; j++ {
switch j % 4 {
case 0:
httpStats.updateStats("GetObject", &xhttp.ResponseRecorder{})
case 1:
httpStats.totalS3Requests.Inc("PutObject")
case 2:
httpStats.totalS3Errors.Inc("DeleteObject")
case 3:
httpStats.currentS3Requests.Inc("ListObjects")
}
}
}(i)
}
for i := range numReaders {
wg.Add(1)
go func(readerID int) {
defer wg.Done()
for range opsPerGoroutine {
_ = httpStats.toServerHTTPStats(false)
_ = httpStats.totalS3Requests.Load(false)
_ = httpStats.currentS3Requests.Load(false)
time.Sleep(1 * time.Microsecond)
}
}(i)
}
wg.Wait()
finalStats := httpStats.toServerHTTPStats(false)
totalRequests := 0
for _, v := range finalStats.TotalS3Requests.APIStats {
totalRequests += v
}
if totalRequests == 0 {
t.Error("Expected some total requests to be recorded, but got zero")
}
t.Logf("Total requests recorded: %d", totalRequests)
t.Logf("Race condition test passed - no races detected")
}
// TestHTTPAPIStatsRaceCondition tests concurrent access to HTTPAPIStats specifically.
func TestRaulHTTPAPIStatsRaceCondition(t *testing.T) {
stats := &HTTPAPIStats{}
const numGoroutines = 50
const opsPerGoroutine = 1000
var wg sync.WaitGroup
for i := range numGoroutines {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < opsPerGoroutine; j++ {
stats.Inc("TestAPI")
}
}(i)
}
for i := range numGoroutines / 2 {
wg.Add(1)
go func(id int) {
defer wg.Done()
for range opsPerGoroutine / 2 {
_ = stats.Load(false)
}
}(i)
}
wg.Wait()
finalStats := stats.Load(false)
expected := numGoroutines * opsPerGoroutine
actual := finalStats["TestAPI"]
if actual != expected {
t.Errorf("Race condition detected: expected %d, got %d (lost %d increments)",
expected, actual, expected-actual)
}
}
// TestBucketHTTPStatsRaceCondition tests concurrent access to bucket-level HTTP stats.
func TestRaulBucketHTTPStatsRaceCondition(t *testing.T) {
bucketStats := newBucketHTTPStats()
const numGoroutines = 50
const opsPerGoroutine = 100
var wg sync.WaitGroup
for i := range numGoroutines {
wg.Add(1)
go func(id int) {
defer wg.Done()
bucketName := "test-bucket"
for range opsPerGoroutine {
bucketStats.updateHTTPStats(bucketName, "GetObject", nil)
recorder := &xhttp.ResponseRecorder{}
bucketStats.updateHTTPStats(bucketName, "GetObject", recorder)
_ = bucketStats.load(bucketName)
}
}(i)
}
wg.Wait()
stats := bucketStats.load("test-bucket")
if stats.totalS3Requests == nil {
t.Error("Expected bucket stats to be initialized")
}
t.Logf("Bucket HTTP stats race test passed")
}

View File

@ -1128,7 +1128,7 @@ func (store *IAMStoreSys) listGroups(ctx context.Context) (res []string, err err
return true
})
}
return
return res, err
}
// PolicyDBUpdate - adds or removes given policies to/from the user or group's
@ -1139,7 +1139,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
) {
if name == "" {
err = errInvalidArgument
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
cache := store.lock()
@ -1163,12 +1163,12 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
g, ok := cache.iamGroupsMap[name]
if !ok {
err = errNoSuchGroup
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
if g.Status == statusDisabled {
err = errGroupDisabled
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
}
mp, _ = cache.iamGroupPolicyMap.Load(name)
@ -1186,7 +1186,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
for _, p := range policiesToUpdate.ToSlice() {
if _, found := cache.iamPolicyDocsMap[p]; !found {
err = errNoSuchPolicy
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
}
newPolicySet = existingPolicySet.Union(policiesToUpdate)
@ -1198,7 +1198,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
// We return an error if the requested policy update will have no effect.
if policiesToUpdate.IsEmpty() {
err = errNoPolicyToAttachOrDetach
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
newPolicies := newPolicySet.ToSlice()
@ -1210,7 +1210,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
// in this case, we delete the mapping from the store.
if len(newPolicies) == 0 {
if err = store.deleteMappedPolicy(ctx, name, userType, isGroup); err != nil && !errors.Is(err, errNoSuchPolicy) {
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
if !isGroup {
if userType == stsUser {
@ -1223,7 +1223,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro
}
} else {
if err = store.saveMappedPolicy(ctx, name, userType, isGroup, newPolicyMapping); err != nil {
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
if !isGroup {
if userType == stsUser {
@ -3052,7 +3052,7 @@ func extractJWTClaims(u UserIdentity) (jwtClaims *jwt.MapClaims, err error) {
break
}
}
return
return jwtClaims, err
}
func validateSvcExpirationInUTC(expirationInUTC time.Time) error {

View File

@ -1029,7 +1029,7 @@ func (sys *IAMSys) SetUserStatus(ctx context.Context, accessKey string, status m
updatedAt, err = sys.store.SetUserStatus(ctx, accessKey, status)
if err != nil {
return
return updatedAt, err
}
sys.notifyForUser(ctx, accessKey, false)
@ -1985,7 +1985,7 @@ func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userTyp
updatedAt, err = sys.store.PolicyDBSet(ctx, name, policy, userType, isGroup)
if err != nil {
return
return updatedAt, err
}
// Notify all other MinIO peers to reload policy
@ -2008,7 +2008,7 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) {
if !sys.Initialized() {
err = errServerNotInitialized
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
userOrGroup := r.User
@ -2021,24 +2021,24 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
if isGroup {
_, err = sys.GetGroupDescription(userOrGroup)
if err != nil {
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
} else {
var isTemp bool
isTemp, _, err = sys.IsTempUser(userOrGroup)
if err != nil && err != errNoSuchUser {
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
if isTemp {
err = errIAMActionNotAllowed
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
// When the user is root credential you are not allowed to
// add policies for root user.
if userOrGroup == globalActiveCred.AccessKey {
err = errIAMActionNotAllowed
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
// Validate that user exists.
@ -2046,14 +2046,14 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
_, userExists = sys.GetUser(ctx, userOrGroup)
if !userExists {
err = errNoSuchUser
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
}
updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, userOrGroup, isGroup,
regUser, r.Policies, isAttach)
if err != nil {
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
// Notify all other MinIO peers to reload policy
@ -2077,7 +2077,7 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool,
UpdatedAt: updatedAt,
}))
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
// PolicyDBUpdateLDAP - adds or removes policies from a user or a group verified
@ -2087,7 +2087,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) {
if !sys.Initialized() {
err = errServerNotInitialized
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
var dn string
@ -2097,7 +2097,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
dnResult, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User)
if err != nil {
iamLogIf(ctx, err)
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
if dnResult == nil {
// dn not found - still attempt to detach if provided user is a DN.
@ -2105,7 +2105,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
dn = sys.LDAPConfig.QuickNormalizeDN(r.User)
} else {
err = errNoSuchUser
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
} else {
dn = dnResult.NormDN
@ -2115,14 +2115,14 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
var underBaseDN bool
if dnResult, underBaseDN, err = sys.LDAPConfig.GetValidatedGroupDN(nil, r.Group); err != nil {
iamLogIf(ctx, err)
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
if dnResult == nil || !underBaseDN {
if !isAttach {
dn = sys.LDAPConfig.QuickNormalizeDN(r.Group)
} else {
err = errNoSuchGroup
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
} else {
// We use the group DN returned by the LDAP server (this may not
@ -2149,7 +2149,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(
ctx, dn, isGroup, userType, r.Policies, isAttach)
if err != nil {
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
// Notify all other MinIO peers to reload policy
@ -2173,7 +2173,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
UpdatedAt: updatedAt,
}))
return
return updatedAt, addedOrRemoved, effectivePolicies, err
}
// PolicyDBGet - gets policy set on a user or group. If a list of groups is
@ -2376,7 +2376,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
// Now check if we have a sessionPolicy.
spolicy, ok := args.Claims[sessionPolicyNameExtracted]
if !ok {
return
return hasSessionPolicy, isAllowed
}
hasSessionPolicy = true
@ -2385,7 +2385,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
if !ok {
// Sub policy if set, should be a string reject
// malformed/malicious requests.
return
return hasSessionPolicy, isAllowed
}
// Check if policy is parseable.
@ -2393,7 +2393,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
if err != nil {
// Log any error in input session policy config.
iamLogIf(GlobalContext, err)
return
return hasSessionPolicy, isAllowed
}
// SPECIAL CASE: For service accounts, any valid JSON is allowed as a
@ -2417,7 +2417,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli
// 2. do not allow empty statement policies for service accounts.
if subPolicy.Version == "" && subPolicy.Statements == nil && subPolicy.ID == "" {
hasSessionPolicy = false
return
return hasSessionPolicy, isAllowed
}
// As the session policy exists, even if the parent is the root account, it
@ -2437,7 +2437,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
// Now check if we have a sessionPolicy.
spolicy, ok := args.Claims[sessionPolicyNameExtracted]
if !ok {
return
return hasSessionPolicy, isAllowed
}
hasSessionPolicy = true
@ -2446,7 +2446,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
if !ok {
// Sub policy if set, should be a string reject
// malformed/malicious requests.
return
return hasSessionPolicy, isAllowed
}
// Check if policy is parseable.
@ -2454,12 +2454,12 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
if err != nil {
// Log any error in input session policy config.
iamLogIf(GlobalContext, err)
return
return hasSessionPolicy, isAllowed
}
// Policy without Version string value reject it.
if subPolicy.Version == "" {
return
return hasSessionPolicy, isAllowed
}
// As the session policy exists, even if the parent is the root account, it

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -159,5 +159,5 @@ func pickRelevantGoroutines() (gs []string) {
gs = append(gs, g)
}
sort.Strings(gs)
return
return gs
}

View File

@ -162,7 +162,7 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool
reply = l.removeEntry(resource, args, &lri) || reply
}
}
return
return reply, err
}
// removeEntry based on the uid of the lock message, removes a single entry from the

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"time"
@ -19,21 +19,19 @@ func (z *localLockMap) DecodeMsg(dc *msgp.Reader) (err error) {
if (*z) == nil {
(*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
clear((*z))
}
var field []byte
_ = field
for zb0004 > 0 {
zb0004--
var zb0001 string
var zb0002 []lockRequesterInfo
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0002 []lockRequesterInfo
var zb0005 uint32
zb0005, err = dc.ReadArrayHeader()
if err != nil {
@ -115,16 +113,14 @@ func (z *localLockMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
if (*z) == nil {
(*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
clear((*z))
}
var field []byte
_ = field
for zb0004 > 0 {
var zb0001 string
var zb0002 []lockRequesterInfo
zb0004--
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -339,7 +339,7 @@ func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheE
if !o.Versioned && !o.V1 {
fi, err := obj.fileInfo(o.Bucket)
if err != nil {
return
return skip
}
objInfo := fi.ToObjectInfo(o.Bucket, obj.name, versioned)
if o.Lifecycle != nil {
@ -350,7 +350,7 @@ func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheE
fiv, err := obj.fileInfoVersions(o.Bucket)
if err != nil {
return
return skip
}
// Expire all versions if needed, if not attempt to queue for replication.
@ -369,7 +369,7 @@ func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheE
queueReplicationHeal(ctx, o.Bucket, objInfo, o.Replication, 0)
}
return
return skip
}
func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) {

View File

@ -653,7 +653,7 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com
commonWrite = filter(writes)
commonDelete = filter(deletes)
return
return commonWrite, commonDelete
}
func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) {

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -40,7 +40,7 @@ type collectMetricsOpts struct {
func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
if types == madmin.MetricsNone {
return
return m
}
byHostName := globalMinioAddr
@ -51,7 +51,7 @@ func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m ma
if _, ok := opts.hosts[server.Endpoint]; ok {
byHostName = server.Endpoint
} else {
return
return m
}
}
@ -221,7 +221,7 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM
func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) {
if !globalIsDistErasure {
return
return m
}
all := globalNotificationSys.GetMetrics(ctx, types, opts)
for _, remote := range all {

View File

@ -1704,7 +1704,7 @@ func getMinioProcMetrics() *MetricsGroupV2 {
p, err := procfs.Self()
if err != nil {
internalLogOnceIf(ctx, err, string(nodeMetricNamespace))
return
return metrics
}
openFDs, _ := p.FileDescriptorsLen()
@ -1819,7 +1819,7 @@ func getMinioProcMetrics() *MetricsGroupV2 {
Value: stat.CPUTime(),
})
}
return
return metrics
})
return mg
}
@ -1833,7 +1833,7 @@ func getGoMetrics() *MetricsGroupV2 {
Description: getMinIOGORoutineCountMD(),
Value: float64(runtime.NumGoroutine()),
})
return
return metrics
})
return mg
}
@ -2632,7 +2632,7 @@ func getMinioVersionMetrics() *MetricsGroupV2 {
Description: getMinIOVersionMD(),
VariableLabels: map[string]string{"version": Version},
})
return
return metrics
})
return mg
}
@ -2653,7 +2653,7 @@ func getNodeHealthMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Description: getNodeOfflineTotalMD(),
Value: float64(nodesDown),
})
return
return metrics
})
return mg
}
@ -2666,11 +2666,11 @@ func getMinioHealingMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) {
bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
if !exists {
return
return metrics
}
if bgSeq.lastHealActivity.IsZero() {
return
return metrics
}
metrics = make([]MetricV2, 0, 5)
@ -2681,7 +2681,7 @@ func getMinioHealingMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
metrics = append(metrics, getObjectsScanned(bgSeq)...)
metrics = append(metrics, getHealedItems(bgSeq)...)
metrics = append(metrics, getFailedItems(bgSeq)...)
return
return metrics
})
return mg
}
@ -2696,7 +2696,7 @@ func getFailedItems(seq *healSequence) (m []MetricV2) {
Value: float64(v),
})
}
return
return m
}
func getHealedItems(seq *healSequence) (m []MetricV2) {
@ -2709,7 +2709,7 @@ func getHealedItems(seq *healSequence) (m []MetricV2) {
Value: float64(v),
})
}
return
return m
}
func getObjectsScanned(seq *healSequence) (m []MetricV2) {
@ -2722,7 +2722,7 @@ func getObjectsScanned(seq *healSequence) (m []MetricV2) {
Value: float64(v),
})
}
return
return m
}
func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
@ -3030,7 +3030,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
VariableLabels: map[string]string{"api": api},
})
}
return
return metrics
}
// If we have too many, limit them
@ -3099,7 +3099,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
}
}
return
return metrics
})
return mg
}
@ -3142,7 +3142,7 @@ func getNetworkMetrics() *MetricsGroupV2 {
Description: getS3ReceivedBytesMD(),
Value: float64(connStats.s3InputBytes),
})
return
return metrics
})
return mg
}
@ -3155,19 +3155,19 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) {
objLayer := newObjectLayerFn()
if objLayer == nil {
return
return metrics
}
metrics = make([]MetricV2, 0, 50)
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil {
metricsLogIf(ctx, err)
return
return metrics
}
// data usage has not captured any data yet.
if dataUsageInfo.LastUpdate.IsZero() {
return
return metrics
}
metrics = append(metrics, MetricV2{
@ -3248,7 +3248,7 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Value: float64(clusterBuckets),
})
return
return metrics
})
return mg
}
@ -3265,12 +3265,12 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil {
metricsLogIf(ctx, err)
return
return metrics
}
// data usage has not captured any data yet.
if dataUsageInfo.LastUpdate.IsZero() {
return
return metrics
}
metrics = append(metrics, MetricV2{
@ -3454,7 +3454,7 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
VariableLabels: map[string]string{"bucket": bucket},
})
}
return
return metrics
})
return mg
}
@ -3498,17 +3498,17 @@ func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
objLayer := newObjectLayerFn()
if globalTierConfigMgr.Empty() {
return
return metrics
}
dui, err := loadDataUsageFromBackend(ctx, objLayer)
if err != nil {
metricsLogIf(ctx, err)
return
return metrics
}
// data usage has not captured any tier stats yet.
if dui.TierStats == nil {
return
return metrics
}
return dui.tierMetrics()
@ -3614,7 +3614,7 @@ func getLocalStorageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Value: float64(storageInfo.Backend.RRSCParity),
})
return
return metrics
})
return mg
}
@ -3755,7 +3755,7 @@ func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
})
}
return
return metrics
})
return mg
@ -3776,7 +3776,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
m.Merge(&mRemote)
if m.Aggregated.BatchJobs == nil {
return
return metrics
}
for _, mj := range m.Aggregated.BatchJobs.Jobs {
@ -3822,7 +3822,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
},
)
}
return
return metrics
})
return mg
}
@ -3875,7 +3875,7 @@ func getClusterStorageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
Description: getClusterDrivesTotalMD(),
Value: float64(totalDrives.Sum()),
})
return
return metrics
})
return mg
}
@ -4264,7 +4264,7 @@ func getOrderedLabelValueArrays(labelsWithValue map[string]string) (labels, valu
labels = append(labels, l)
values = append(values, v)
}
return
return labels, values
}
// newMinioCollectorNode describes the collector

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)
@ -297,14 +297,12 @@ func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.StaticLabels == nil {
z.StaticLabels = make(map[string]string, zb0002)
} else if len(z.StaticLabels) > 0 {
for key := range z.StaticLabels {
delete(z.StaticLabels, key)
}
clear(z.StaticLabels)
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels")
@ -333,14 +331,12 @@ func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.VariableLabels == nil {
z.VariableLabels = make(map[string]string, zb0003)
} else if len(z.VariableLabels) > 0 {
for key := range z.VariableLabels {
delete(z.VariableLabels, key)
}
clear(z.VariableLabels)
}
for zb0003 > 0 {
var za0003 string
var za0004 string
zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels")
@ -369,14 +365,12 @@ func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Histogram == nil {
z.Histogram = make(map[string]uint64, zb0004)
} else if len(z.Histogram) > 0 {
for key := range z.Histogram {
delete(z.Histogram, key)
}
clear(z.Histogram)
}
for zb0004 > 0 {
var za0005 string
var za0006 uint64
zb0004--
var za0005 string
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram")

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"testing"

View File

@ -60,7 +60,7 @@ type nodesOnline struct {
func newNodesUpDownCache() *cachevalue.Cache[nodesOnline] {
loadNodesUpDown := func(ctx context.Context) (v nodesOnline, err error) {
v.Online, v.Offline = globalNotificationSys.GetPeerOnlineCount()
return
return v, err
}
return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true},
@ -88,12 +88,12 @@ func newDataUsageInfoCache() *cachevalue.Cache[DataUsageInfo] {
loadDataUsage := func(ctx context.Context) (u DataUsageInfo, err error) {
objLayer := newObjectLayerFn()
if objLayer == nil {
return
return u, err
}
// Collect cluster level object metrics.
u, err = loadDataUsageFromBackend(GlobalContext, objLayer)
return
return u, err
}
return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true},
@ -104,11 +104,11 @@ func newESetHealthResultCache() *cachevalue.Cache[HealthResult] {
loadHealth := func(ctx context.Context) (r HealthResult, err error) {
objLayer := newObjectLayerFn()
if objLayer == nil {
return
return r, err
}
r = objLayer.Health(GlobalContext, HealthOptions{})
return
return r, err
}
return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true},
@ -146,7 +146,7 @@ func getDriveIOStatMetrics(ioStats madmin.DiskIOStats, duration time.Duration) (
// TotalTicks is in milliseconds
m.percUtil = float64(ioStats.TotalTicks) * 100 / (durationSecs * 1000)
return
return m
}
func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
@ -161,7 +161,7 @@ func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
loadDriveMetrics := func(ctx context.Context) (v storageMetrics, err error) {
objLayer := newObjectLayerFn()
if objLayer == nil {
return
return v, err
}
storageInfo := objLayer.LocalStorageInfo(GlobalContext, true)
@ -195,7 +195,7 @@ func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
prevDriveIOStatsRefreshedAt = now
prevDriveIOStatsMu.Unlock()
return
return v, err
}
return cachevalue.NewFromFunc(1*time.Minute,
@ -220,7 +220,7 @@ func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] {
}
}
return
return v, err
}
return cachevalue.NewFromFunc(1*time.Minute,
@ -245,7 +245,7 @@ func newMemoryMetricsCache() *cachevalue.Cache[madmin.MemInfo] {
}
}
return
return v, err
}
return cachevalue.NewFromFunc(1*time.Minute,
@ -268,7 +268,7 @@ func newClusterStorageInfoCache() *cachevalue.Cache[storageMetrics] {
offlineDrives: offlineDrives.Sum(),
totalDrives: totalDrives.Sum(),
}
return
return v, err
}
return cachevalue.NewFromFunc(1*time.Minute,
cachevalue.Opts{ReturnLastGood: true},

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"bytes"
"testing"

View File

@ -126,7 +126,7 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo
n.lockMapMutex.Unlock()
}
return
return locked
}
// Unlock the namespace resource.

View File

@ -88,7 +88,7 @@ func mustGetLocalLoopbacks() (ipList set.StringSet) {
ipList.Add(ip.String())
}
}
return
return ipList
}
// mustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error.
@ -99,7 +99,7 @@ func mustGetLocalIP4() (ipList set.StringSet) {
ipList.Add(ip.String())
}
}
return
return ipList
}
// mustGetLocalIP6 returns IPv6 addresses of localhost. It panics on error.
@ -110,7 +110,7 @@ func mustGetLocalIP6() (ipList set.StringSet) {
ipList.Add(ip.String())
}
}
return
return ipList
}
// getHostIP returns IP address of given host.

View File

@ -26,7 +26,7 @@ func GetTotalCapacity(diskInfo []madmin.Disk) (capacity uint64) {
for _, disk := range diskInfo {
capacity += disk.TotalSpace
}
return
return capacity
}
// GetTotalUsableCapacity gets the total usable capacity in the cluster.
@ -42,7 +42,7 @@ func GetTotalUsableCapacity(diskInfo []madmin.Disk, s StorageInfo) (capacity uin
capacity += disk.TotalSpace
}
}
return
return capacity
}
// GetTotalCapacityFree gets the total capacity free in the cluster.
@ -50,7 +50,7 @@ func GetTotalCapacityFree(diskInfo []madmin.Disk) (capacity uint64) {
for _, d := range diskInfo {
capacity += d.AvailableSpace
}
return
return capacity
}
// GetTotalUsableCapacityFree gets the total usable capacity free in the cluster.
@ -66,5 +66,5 @@ func GetTotalUsableCapacityFree(diskInfo []madmin.Disk, s StorageInfo) (capacity
capacity += disk.AvailableSpace
}
}
return
return capacity
}

View File

@ -375,7 +375,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io
internalLogIf(ctx, err)
}
return
return profilingDataFound
}
// VerifyBinary - asks remote peers to verify the checksum
@ -1180,7 +1180,7 @@ func (sys *NotificationSys) GetPeerOnlineCount() (nodesOnline, nodesOffline int)
nodesOffline++
}
}
return
return nodesOnline, nodesOffline
}
// NewNotificationSys - creates new notification system object.

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/minio/minio/internal/bucket/replication"
"github.com/tinylib/msgp/msgp"
@ -1120,14 +1120,12 @@ func (z *ListPartsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.UserDefined == nil {
z.UserDefined = make(map[string]string, zb0003)
} else if len(z.UserDefined) > 0 {
for key := range z.UserDefined {
delete(z.UserDefined, key)
}
clear(z.UserDefined)
}
for zb0003 > 0 {
var za0002 string
var za0003 string
zb0003--
var za0002 string
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UserDefined")
@ -1259,14 +1257,12 @@ func (z *MultipartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.UserDefined == nil {
z.UserDefined = make(map[string]string, zb0002)
} else if len(z.UserDefined) > 0 {
for key := range z.UserDefined {
delete(z.UserDefined, key)
}
clear(z.UserDefined)
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UserDefined")
@ -1665,14 +1661,12 @@ func (z *ObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.UserDefined == nil {
z.UserDefined = make(map[string]string, zb0002)
} else if len(z.UserDefined) > 0 {
for key := range z.UserDefined {
delete(z.UserDefined, key)
}
clear(z.UserDefined)
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UserDefined")
@ -2223,14 +2217,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.TargetStatuses == nil {
z.TargetStatuses = make(map[string]replication.StatusType, zb0002)
} else if len(z.TargetStatuses) > 0 {
for key := range z.TargetStatuses {
delete(z.TargetStatuses, key)
}
clear(z.TargetStatuses)
}
for zb0002 > 0 {
var za0001 string
var za0002 replication.StatusType
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TargetStatuses")
@ -2253,14 +2245,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.TargetPurgeStatuses == nil {
z.TargetPurgeStatuses = make(map[string]VersionPurgeStatusType, zb0003)
} else if len(z.TargetPurgeStatuses) > 0 {
for key := range z.TargetPurgeStatuses {
delete(z.TargetPurgeStatuses, key)
}
clear(z.TargetPurgeStatuses)
}
for zb0003 > 0 {
var za0003 string
var za0004 VersionPurgeStatusType
zb0003--
var za0003 string
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TargetPurgeStatuses")

View File

@ -225,11 +225,11 @@ func (o *ObjectOptions) SetDeleteReplicationState(dsc ReplicateDecision, vID str
func (o *ObjectOptions) PutReplicationState() (r ReplicationState) {
rstatus, ok := o.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus]
if !ok {
return
return r
}
r.ReplicationStatusInternal = rstatus
r.Targets = replicationStatusesMap(rstatus)
return
return r
}
// SetEvalMetadataFn sets the metadata evaluation function

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)

View File

@ -50,33 +50,33 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str
if crypto.SSECopy.IsRequested(header) {
clientKey, err = crypto.SSECopy.ParseHTTP(header)
if err != nil {
return
return opts, err
}
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return
return opts, err
}
opts.ServerSideEncryption = encrypt.SSECopy(sse)
return
return opts, err
}
return
return opts, err
}
if crypto.SSEC.IsRequested(header) {
clientKey, err = crypto.SSEC.ParseHTTP(header)
if err != nil {
return
return opts, err
}
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return
return opts, err
}
opts.ServerSideEncryption = sse
return
return opts, err
}
if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) {
opts.ServerSideEncryption = encrypt.NewSSE()
}
return
return opts, err
}
// get ObjectOptions for GET calls from encryption headers
@ -173,7 +173,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = toAPIError(ctx, vErr)
}
valid = false
return
return opts, valid
}
opts.MaxParts, err = parseIntHeader(bucket, object, r.Header, xhttp.AmzMaxParts)
@ -181,7 +181,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = toAPIError(ctx, err)
argumentName = strings.ToLower(xhttp.AmzMaxParts)
valid = false
return
return opts, valid
}
if opts.MaxParts == 0 {
@ -193,7 +193,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = toAPIError(ctx, err)
argumentName = strings.ToLower(xhttp.AmzPartNumberMarker)
valid = false
return
return opts, valid
}
opts.ObjectAttributes = parseObjectAttributes(r.Header)
@ -201,7 +201,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
apiErr = errorCodes.ToAPIErr(ErrInvalidAttributeName)
argumentName = strings.ToLower(xhttp.AmzObjectAttributes)
valid = false
return
return opts, valid
}
for tag := range opts.ObjectAttributes {
@ -216,11 +216,11 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r
argumentName = strings.ToLower(xhttp.AmzObjectAttributes)
argumentValue = tag
valid = false
return
return opts, valid
}
}
return
return opts, valid
}
func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) {
@ -233,13 +233,13 @@ func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) {
}
}
return
return attributes
}
func parseIntHeader(bucket, object string, h http.Header, headerName string) (value int, err error) {
stringInt := strings.TrimSpace(h.Get(headerName))
if stringInt == "" {
return
return value, err
}
value, err = strconv.Atoi(stringInt)
if err != nil {
@ -249,7 +249,7 @@ func parseIntHeader(bucket, object string, h http.Header, headerName string) (va
Err: fmt.Errorf("Unable to parse %s, value should be an integer", headerName),
}
}
return
return value, err
}
func parseBoolHeader(bucket, object string, h http.Header, headerName string) (bool, error) {

View File

@ -2662,7 +2662,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
return err
}
}
return
return err
})
deleteObject := objectAPI.DeleteObject

View File

@ -245,7 +245,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke
for _, l := range oi.partLengths {
sum += l
}
return
return sum
}
// set of inputs for uploading the objects before tests for
@ -677,7 +677,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
for _, l := range oi.partLengths {
sum += l
}
return
return sum
}
// set of inputs for uploading the objects before tests for

View File

@ -136,7 +136,7 @@ func parseDirEnt(buf []byte) (consumed int, name []byte, typ os.FileMode, err er
}
consumed = int(dirent.Reclen)
if direntInode(dirent) == 0 { // File absent in directory.
return
return consumed, name, typ, err
}
switch dirent.Type {
case syscall.DT_REG:
@ -349,7 +349,7 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er
entries = append(entries, nameStr)
}
return
return entries, err
}
func globalSync() {

View File

@ -265,7 +265,7 @@ func (client *peerRESTClient) StartProfiling(ctx context.Context, profiler strin
func (client *peerRESTClient) DownloadProfileData(ctx context.Context) (data map[string][]byte, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDownloadProfilingData, nil, nil, -1)
if err != nil {
return
return data, err
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&data)

View File

@ -146,7 +146,7 @@ func (s *peerRESTServer) DeletePolicyHandler(mss *grid.MSS) (np grid.NoPayload,
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// LoadPolicyHandler - reloads a policy on the server.
@ -165,7 +165,7 @@ func (s *peerRESTServer) LoadPolicyHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// LoadPolicyMappingHandler - reloads a policy mapping on the server.
@ -189,7 +189,7 @@ func (s *peerRESTServer) LoadPolicyMappingHandler(mss *grid.MSS) (np grid.NoPayl
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// DeleteServiceAccountHandler - deletes a service account on the server.
@ -208,7 +208,7 @@ func (s *peerRESTServer) DeleteServiceAccountHandler(mss *grid.MSS) (np grid.NoP
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// LoadServiceAccountHandler - reloads a service account on the server.
@ -227,7 +227,7 @@ func (s *peerRESTServer) LoadServiceAccountHandler(mss *grid.MSS) (np grid.NoPay
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// DeleteUserHandler - deletes a user on the server.
@ -246,7 +246,7 @@ func (s *peerRESTServer) DeleteUserHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// LoadUserHandler - reloads a user on the server.
@ -275,7 +275,7 @@ func (s *peerRESTServer) LoadUserHandler(mss *grid.MSS) (np grid.NoPayload, nerr
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// LoadGroupHandler - reloads group along with members list.
@ -295,7 +295,7 @@ func (s *peerRESTServer) LoadGroupHandler(mss *grid.MSS) (np grid.NoPayload, ner
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
// StartProfilingHandler - Issues the start profiling command.
@ -479,7 +479,7 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoP
if localMetacacheMgr != nil {
localMetacacheMgr.deleteBucketCache(bucketName)
}
return
return np, nerr
}
// GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer.
@ -554,7 +554,7 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPay
globalBucketTargetSys.UpdateAllTargets(bucketName, meta.bucketTargetConfig)
}
return
return np, nerr
}
func (s *peerRESTServer) GetMetacacheListingHandler(opts *listPathOptions) (*metacache, *grid.RemoteErr) {
@ -885,7 +885,7 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g
}
peersLogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI))
return
return np, nerr
}
func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -896,14 +896,14 @@ func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload
pools, ok := objAPI.(*erasureServerPools)
if !ok {
return
return np, nerr
}
if err := pools.ReloadPoolMeta(context.Background()); err != nil {
return np, grid.NewRemoteErr(err)
}
return
return np, nerr
}
func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -914,7 +914,7 @@ func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload,
pools, ok := objAPI.(*erasureServerPools)
if !ok {
return
return np, nerr
}
// No need to return errors, this is not a highly strict operation.
@ -923,7 +923,7 @@ func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload,
pools.ClearUploadID(uploadID)
}
return
return np, nerr
}
func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -938,7 +938,7 @@ func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload,
}
pools.StopRebalance()
return
return np, nerr
}
func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -965,7 +965,7 @@ func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayl
go pools.StartRebalance()
}
return
return np, nerr
}
func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
@ -981,7 +981,7 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid
}
}()
return
return np, nerr
}
// ConsoleLogHandler sends console logs of this node back to peer rest client

View File

@ -422,7 +422,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
cli, err := globalSiteReplicationSys.getAdminClient(ctx, deploymentID)
if err != nil {
result.Error = err.Error()
return
return result
}
rp := cli.GetEndpointURL()
reqURL := &url.URL{
@ -434,7 +434,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
req, err := http.NewRequestWithContext(ctx, http.MethodPost, reqURL.String(), reader)
if err != nil {
result.Error = err.Error()
return
return result
}
client := &http.Client{
Transport: globalRemoteTargetTransport,
@ -442,7 +442,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
resp, err := client.Do(req)
if err != nil {
result.Error = err.Error()
return
return result
}
defer xhttp.DrainBody(resp.Body)
err = gob.NewDecoder(resp.Body).Decode(&result)
@ -451,5 +451,5 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io
if err != nil {
result.Error = err.Error()
}
return
return result
}

View File

@ -344,7 +344,7 @@ func validateClientKeyIsTrusted(c ssh.ConnMetadata, clientKey ssh.PublicKey) (er
}
_, err = checker.Authenticate(c, clientKey)
return
return err
}
type sftpLogger struct{}

View File

@ -1,7 +1,7 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
package cmd
import (
"github.com/tinylib/msgp/msgp"
)
@ -209,19 +209,17 @@ func (z *RTimedMetrics) DecodeMsg(dc *msgp.Reader) (err error) {
if z.ErrCounts == nil {
z.ErrCounts = make(map[string]int, zb0003)
} else if len(z.ErrCounts) > 0 {
for key := range z.ErrCounts {
delete(z.ErrCounts, key)
}
clear(z.ErrCounts)
}
for zb0003 > 0 {
zb0003--
var za0001 string
var za0002 int
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
return
}
var za0002 int
za0002, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "ErrCounts", za0001)
@ -426,14 +424,12 @@ func (z *RTimedMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.ErrCounts == nil {
z.ErrCounts = make(map[string]int, zb0003)
} else if len(z.ErrCounts) > 0 {
for key := range z.ErrCounts {
delete(z.ErrCounts, key)
}
clear(z.ErrCounts)
}
for zb0003 > 0 {
var za0001 string
var za0002 int
zb0003--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErrCounts")
@ -839,19 +835,17 @@ func (z *SRMetricsSummary) DecodeMsg(dc *msgp.Reader) (err error) {
if z.Metrics == nil {
z.Metrics = make(map[string]SRMetric, zb0002)
} else if len(z.Metrics) > 0 {
for key := range z.Metrics {
delete(z.Metrics, key)
}
clear(z.Metrics)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 SRMetric
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
var za0002 SRMetric
err = za0002.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Metrics", za0001)
@ -1070,14 +1064,12 @@ func (z *SRMetricsSummary) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.Metrics == nil {
z.Metrics = make(map[string]SRMetric, zb0002)
} else if len(z.Metrics) > 0 {
for key := range z.Metrics {
delete(z.Metrics, key)
}
clear(z.Metrics)
}
for zb0002 > 0 {
var za0001 string
var za0002 SRMetric
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics")
@ -1161,19 +1153,17 @@ func (z *SRStats) DecodeMsg(dc *msgp.Reader) (err error) {
if z.M == nil {
z.M = make(map[string]*SRStatus, zb0002)
} else if len(z.M) > 0 {
for key := range z.M {
delete(z.M, key)
}
clear(z.M)
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 *SRStatus
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "M")
return
}
var za0002 *SRStatus
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
@ -1327,14 +1317,12 @@ func (z *SRStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
if z.M == nil {
z.M = make(map[string]*SRStatus, zb0002)
} else if len(z.M) > 0 {
for key := range z.M {
delete(z.M, key)
}
clear(z.M)
}
for zb0002 > 0 {
var za0001 string
var za0002 *SRStatus
zb0002--
var za0001 string
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "M")

Some files were not shown because too many files have changed in this diff Show More