mirror of https://github.com/minio/minio.git
Compare commits
41 Commits
b3001fd7c0
...
5d60407b7b
Author | SHA1 | Date |
---|---|---|
|
5d60407b7b | |
|
456d9462e5 | |
|
756f3c8142 | |
|
7a80ec1cce | |
|
ae71d76901 | |
|
07c3a429bf | |
|
0cde982902 | |
|
d0f50cdd9b | |
|
da532ab93d | |
|
558fc1c09c | |
|
9fdbf6fe83 | |
|
5c87d4ae87 | |
|
f0b91e5504 | |
|
3b7cb6512c | |
|
4ea6f3b06b | |
|
86d9d9b55e | |
|
5a35585acd | |
|
0848e69602 | |
|
02ba581ecf | |
|
b44b2a090c | |
|
c7d6a9722d | |
|
a8abdc797e | |
|
0638ccc5f3 | |
|
b1a34fd63f | |
|
ffcfa36b13 | |
|
376fbd11a7 | |
|
c76f209ccc | |
|
7a6a2256b1 | |
|
d002beaee3 | |
|
71f293d9ab | |
|
e3d183b6a4 | |
|
752abc2e2c | |
|
b9f0e8c712 | |
|
7ced9663e6 | |
|
50fcf9b670 | |
|
64f5c6103f | |
|
e909be6380 | |
|
83b2ad418b | |
|
7a64bb9766 | |
|
34679befef | |
|
4021d8c8e2 |
34
README.md
34
README.md
|
@ -20,7 +20,7 @@ Use the following commands to run a standalone MinIO server as a container.
|
|||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication
|
||||
require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically,
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
|
||||
for more complete documentation.
|
||||
|
||||
### Stable
|
||||
|
@ -38,7 +38,7 @@ root credentials. You can use the Browser to create buckets, upload objects, and
|
|||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See
|
||||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> [!NOTE]
|
||||
> To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option.
|
||||
|
@ -48,7 +48,7 @@ see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinI
|
|||
|
||||
Use the following commands to run a standalone MinIO server on macOS.
|
||||
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
### Homebrew (recommended)
|
||||
|
||||
|
@ -69,7 +69,7 @@ brew install minio/stable/minio
|
|||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html/> to view MinIO SDKs for supported languages.
|
||||
|
||||
### Binary Download
|
||||
|
||||
|
@ -83,7 +83,7 @@ chmod +x minio
|
|||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
## GNU/Linux
|
||||
|
||||
|
@ -105,10 +105,10 @@ The following table lists supported architectures. Replace the `wget` URL with t
|
|||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
## Microsoft Windows
|
||||
|
||||
|
@ -126,10 +126,10 @@ minio.exe server D:\
|
|||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
## Install from Source
|
||||
|
||||
|
@ -141,10 +141,10 @@ go install github.com/minio/minio@latest
|
|||
|
||||
The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to <http://127.0.0.1:9000> and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server.
|
||||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
|
@ -229,7 +229,7 @@ For example, consider a MinIO deployment behind a proxy `https://minio.example.n
|
|||
|
||||
## Test using MinIO Client `mc`
|
||||
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions.
|
||||
`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) for further instructions.
|
||||
|
||||
## Upgrading MinIO
|
||||
|
||||
|
@ -238,7 +238,7 @@ Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all tr
|
|||
> [!NOTE]
|
||||
> requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
```sh
|
||||
mc admin update <minio alias, e.g., myminio>
|
||||
|
@ -258,10 +258,10 @@ mc admin update <minio alias, e.g., myminio>
|
|||
|
||||
## Explore Further
|
||||
|
||||
- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html)
|
||||
- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html)
|
||||
- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html)
|
||||
- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
|
||||
|
|
|
@ -445,8 +445,10 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
|||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Name: svc.Name,
|
||||
Description: svc.Description,
|
||||
})
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
|
@ -625,8 +627,10 @@ func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.
|
|||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &svc.Expiration,
|
||||
Name: svc.Name,
|
||||
Description: svc.Description,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
|
|
|
@ -173,6 +173,8 @@ func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *htt
|
|||
if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok {
|
||||
continue // skip if no roleArn and no policy claim
|
||||
}
|
||||
// claim-based provider is in the roleArnMap under dummy ARN
|
||||
arn = dummyRoleARN
|
||||
}
|
||||
matchingCfgName, ok := roleArnMap[arn]
|
||||
if !ok {
|
||||
|
|
|
@ -61,7 +61,7 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
if z.IsRebalanceStarted() {
|
||||
if z.IsRebalanceStarted(ctx) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
if pools.IsRebalanceStarted() {
|
||||
if pools.IsRebalanceStarted(ctx) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
}
|
||||
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
|
||||
func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error {
|
||||
data, err := io.ReadAll(body)
|
||||
if err != nil {
|
||||
return SRError{
|
||||
|
|
|
@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
|||
|
||||
// Create a policy policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -113,7 +113,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
|||
userCount := 50
|
||||
accessKeys := make([]string, userCount)
|
||||
secretKeys := make([]string, userCount)
|
||||
for i := 0; i < userCount; i++ {
|
||||
for i := range userCount {
|
||||
accessKey, secretKey := mustGenerateCredentials(c)
|
||||
err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled)
|
||||
if err != nil {
|
||||
|
@ -133,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
|||
}
|
||||
|
||||
g := errgroup.Group{}
|
||||
for i := 0; i < userCount; i++ {
|
||||
for i := range userCount {
|
||||
g.Go(func(i int) func() error {
|
||||
return func() error {
|
||||
uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "")
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
|
@ -157,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, v := range ldapUsers {
|
||||
allCredentials[k] = v
|
||||
}
|
||||
maps.Copy(allCredentials, ldapUsers)
|
||||
|
||||
// Marshal the response
|
||||
data, err := json.Marshal(allCredentials)
|
||||
|
@ -2949,7 +2948,7 @@ func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.
|
|||
name: createReq.Name,
|
||||
description: description,
|
||||
expiration: createReq.Expiration,
|
||||
claims: make(map[string]interface{}),
|
||||
claims: make(map[string]any),
|
||||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
|
|
|
@ -332,7 +332,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
|||
|
||||
// 2.2 create and associate policy to user
|
||||
policy := "mypolicy-test-user-update"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -355,7 +355,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -562,7 +562,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
|||
|
||||
// 1. Create a policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -585,7 +585,7 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -680,7 +680,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
|||
c.Fatalf("bucket creat error: %v", err)
|
||||
}
|
||||
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -703,7 +703,7 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
|
||||
// Check that default policies can be overwritten.
|
||||
err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes)
|
||||
|
@ -739,7 +739,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
}
|
||||
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -762,7 +762,7 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -911,7 +911,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
|||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -934,7 +934,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -995,7 +995,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
|||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -1026,7 +1026,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -1093,7 +1093,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
|||
|
||||
// Create policy, user and associate policy
|
||||
policy := "mypolicy"
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -1116,7 +1116,7 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket, bucket))
|
||||
}`, bucket, bucket)
|
||||
err = s.adm.AddCannedPolicy(ctx, policy, policyBytes)
|
||||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
|
@ -1367,7 +1367,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
|||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -1381,7 +1381,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) {
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
|
@ -1558,7 +1558,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s
|
|||
func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string {
|
||||
c.Helper()
|
||||
versions := []string{}
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
c.Fatalf("upload did not succeed got %#v", err)
|
||||
|
@ -1627,7 +1627,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
|||
svcAK, svcSK := mustGenerateCredentials(c)
|
||||
|
||||
// This policy does not allow listing objects.
|
||||
policyBytes := []byte(fmt.Sprintf(`{
|
||||
policyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -1641,7 +1641,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{
|
||||
Policy: policyBytes,
|
||||
TargetUser: accessKey,
|
||||
|
@ -1655,7 +1655,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
|||
c.mustNotListObjects(ctx, svcClient, bucket)
|
||||
|
||||
// This policy allows listing objects.
|
||||
newPolicyBytes := []byte(fmt.Sprintf(`{
|
||||
newPolicyBytes := fmt.Appendf(nil, `{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
|
@ -1668,7 +1668,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit
|
|||
]
|
||||
}
|
||||
]
|
||||
}`, bucket))
|
||||
}`, bucket)
|
||||
err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{
|
||||
NewPolicy: newPolicyBytes,
|
||||
})
|
||||
|
|
|
@ -954,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
var args dsync.LockArgs
|
||||
var lockers []dsync.NetLocker
|
||||
for _, path := range strings.Split(vars["paths"], ",") {
|
||||
for path := range strings.SplitSeq(vars["paths"], ",") {
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
|
@ -1193,7 +1193,7 @@ type dummyFileInfo struct {
|
|||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
sys interface{}
|
||||
sys any
|
||||
}
|
||||
|
||||
func (f dummyFileInfo) Name() string { return f.name }
|
||||
|
@ -1201,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size }
|
|||
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
|
||||
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f dummyFileInfo) IsDir() bool { return f.isDir }
|
||||
func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
||||
func (f dummyFileInfo) Sys() any { return f.sys }
|
||||
|
||||
// DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
|
||||
// ----------
|
||||
|
|
|
@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool {
|
|||
func TestTopLockEntries(t *testing.T) {
|
||||
locksHeld := make(map[string][]lockRequesterInfo)
|
||||
var owners []string
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
owners = append(owners, fmt.Sprintf("node-%d", i))
|
||||
}
|
||||
|
||||
|
@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) {
|
|||
// request UID, but 10 different resource names associated with it.
|
||||
var lris []lockRequesterInfo
|
||||
uuid := mustGetUUID()
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
resource := fmt.Sprintf("bucket/delete-object-%d", i)
|
||||
lri := lockRequesterInfo{
|
||||
Name: resource,
|
||||
|
@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) {
|
|||
}
|
||||
|
||||
// Add a few concurrent read locks to the mix
|
||||
for i := 0; i < 50; i++ {
|
||||
for i := range 50 {
|
||||
resource := fmt.Sprintf("bucket/get-object-%d", i)
|
||||
lri := lockRequesterInfo{
|
||||
Name: resource,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -520,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
|
|||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
|
||||
for k, v := range h.scannedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.scannedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
@ -534,9 +533,7 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
|||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
|
||||
for k, v := range h.healedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.healedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
@ -549,9 +546,7 @@ func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
|||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
maps.Copy(retMap, h.healFailedItemsMap)
|
||||
|
||||
return retMap
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
|||
}
|
||||
|
||||
// Encodes the response headers into XML format.
|
||||
func encodeResponse(response interface{}) []byte {
|
||||
func encodeResponse(response any) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
|
@ -83,7 +83,7 @@ func encodeResponse(response interface{}) []byte {
|
|||
// Do not use this function for anything other than ListObjects()
|
||||
// variants, please open a github discussion if you wish to use
|
||||
// this in other places.
|
||||
func encodeResponseList(response interface{}) []byte {
|
||||
func encodeResponseList(response any) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(xxml.Header)
|
||||
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
|
@ -94,7 +94,7 @@ func encodeResponseList(response interface{}) []byte {
|
|||
}
|
||||
|
||||
// Encodes the response headers into JSON format.
|
||||
func encodeResponseJSON(response interface{}) []byte {
|
||||
func encodeResponseJSON(response any) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
e := json.NewEncoder(&bytesBuffer)
|
||||
e.Encode(response)
|
||||
|
|
|
@ -100,7 +100,6 @@ func TestObjectLocation(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
|
||||
if testCase.expectedLocation != gotLocation {
|
||||
|
|
|
@ -216,7 +216,7 @@ func getSessionToken(r *http.Request) (token string) {
|
|||
|
||||
// Fetch claims in the security token returned by the client, doesn't return
|
||||
// errors - upon errors the returned claims map will be empty.
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
|
||||
func mustGetClaimsFromToken(r *http.Request) map[string]any {
|
||||
claims, _ := getClaimsFromToken(getSessionToken(r))
|
||||
return claims
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error)
|
|||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client.
|
||||
func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
||||
func getClaimsFromToken(token string) (map[string]any, error) {
|
||||
jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -275,7 +275,7 @@ func getClaimsFromToken(token string) (map[string]interface{}, error) {
|
|||
}
|
||||
|
||||
// Fetch claims in the security token returned by the client and validate the token.
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
|
||||
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) {
|
||||
token := getSessionToken(r)
|
||||
if token != "" && cred.AccessKey == "" {
|
||||
// x-amz-security-token is not allowed for anonymous access.
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -269,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error {
|
|||
func (h *healingTracker) isHealed(bucket string) bool {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
for _, v := range h.HealedBuckets {
|
||||
if v == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(h.HealedBuckets, bucket)
|
||||
}
|
||||
|
||||
// resume will reset progress to the numbers at the start of the bucket.
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -574,9 +575,7 @@ func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
|
|||
oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass
|
||||
}
|
||||
|
||||
for k, v := range objInfo.UserMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
maps.Copy(oi.UserDefined, objInfo.UserMetadata)
|
||||
|
||||
return oi
|
||||
}
|
||||
|
|
|
@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error {
|
|||
type BatchJobSize int64
|
||||
|
||||
// UnmarshalYAML to parse humanized byte values
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error {
|
||||
var batchExpireSz string
|
||||
err := unmarshal(&batchExpireSz)
|
||||
if err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
@ -110,9 +111,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
|||
}
|
||||
}
|
||||
e.kmsContext = kms.Context{}
|
||||
for k, v := range ctx {
|
||||
e.kmsContext[k] = v
|
||||
}
|
||||
maps.Copy(e.kmsContext, ctx)
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
|
@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
|
|||
// Since we are rotating the keys, make sure to update the metadata.
|
||||
oi.metadataOnly = true
|
||||
oi.keyRotation = true
|
||||
for k, v := range encMetadata {
|
||||
oi.UserDefined[k] = v
|
||||
}
|
||||
maps.Copy(oi.UserDefined, encMetadata)
|
||||
if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{
|
||||
VersionID: oi.VersionID,
|
||||
}, ObjectOptions{
|
||||
|
|
|
@ -51,8 +51,8 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i),
|
||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
||||
|
@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
|||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
// insert the object.
|
||||
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
|
||||
for j := 0; j < totalPartsNR; j++ {
|
||||
for j := range totalPartsNR {
|
||||
if j < totalPartsNR-1 {
|
||||
textPartData = textData[j*partSize : (j+1)*partSize-1]
|
||||
} else {
|
||||
|
|
|
@ -154,7 +154,6 @@ func initFederatorBackend(buckets []string, objLayer ObjectLayer) {
|
|||
g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50)
|
||||
|
||||
for index := range bucketsToBeUpdatedSlice {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index])
|
||||
}, index)
|
||||
|
@ -1387,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
// Set the correct hex md5sum for the fan-out stream.
|
||||
fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil))
|
||||
|
||||
concurrentSize := 100
|
||||
if runtime.GOMAXPROCS(0) < concurrentSize {
|
||||
concurrentSize = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
concurrentSize := min(runtime.GOMAXPROCS(0), 100)
|
||||
|
||||
fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries))
|
||||
eventArgsList := make([]eventArgs, 0, len(fanOutEntries))
|
||||
|
@ -1661,9 +1657,11 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.HeadBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getBucketInfo := objectAPI.GetBucketInfo
|
||||
|
|
|
@ -657,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
|||
|
||||
sha256sum := ""
|
||||
var objectNames []string
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
contentBytes := []byte("hello")
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
if i == 0 {
|
||||
|
@ -687,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
|||
|
||||
// The following block will create a bucket policy with delete object to 'public/*'. This is
|
||||
// to test a mixed response of a successful & failure while deleting objects in a single request
|
||||
policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName))
|
||||
policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)
|
||||
rec := httptest.NewRecorder()
|
||||
req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes),
|
||||
credentials.AccessKey, credentials.SecretKey, nil)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -959,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
|||
UserDefined: meta,
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
meta[k] = v
|
||||
}
|
||||
maps.Copy(meta, objInfo.UserDefined)
|
||||
if len(objInfo.UserTags) != 0 {
|
||||
meta[xhttp.AmzObjectTagging] = objInfo.UserTags
|
||||
}
|
||||
|
|
|
@ -472,7 +472,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
|||
return meta, reloaded, nil
|
||||
}
|
||||
|
||||
val, err, _ := sys.group.Do(bucket, func() (val interface{}, err error) {
|
||||
val, err, _ := sys.group.Do(bucket, func() (val any, err error) {
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
|
@ -511,7 +511,6 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []stri
|
|||
g := errgroup.WithNErrs(len(buckets))
|
||||
bucketMetas := make([]BucketMetadata, len(buckets))
|
||||
for index := range buckets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
// Sleep and stagger to avoid blocked CPU and thundering
|
||||
// herd upon start up sequence.
|
||||
|
|
|
@ -297,6 +297,9 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
|||
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, lerr)
|
||||
}
|
||||
if legalHoldPermErr != ErrNone {
|
||||
return mode, retainDate, legalHold, legalHoldPermErr
|
||||
}
|
||||
}
|
||||
|
||||
if retentionRequested {
|
||||
|
|
|
@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
|
|||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Sync start.
|
||||
|
@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// Test case - 1.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
|
@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// Expecting StatusBadRequest (400).
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: maxBucketPolicySize + 1,
|
||||
accessKey: credentials.AccessKey,
|
||||
|
@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// Expecting the HTTP response status to be StatusLengthRequired (411).
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: 0,
|
||||
accessKey: credentials.AccessKey,
|
||||
|
@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// checkBucketPolicyResources should fail.
|
||||
{
|
||||
bucketName: bucketName1,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
|
@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// should result in 404 StatusNotFound
|
||||
{
|
||||
bucketName: "non-existent-bucket",
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
|
@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// should result in 404 StatusNotFound
|
||||
{
|
||||
bucketName: ".invalid-bucket",
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
|
@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
|||
// should result in 400 StatusBadRequest.
|
||||
{
|
||||
bucketName: bucketName,
|
||||
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))),
|
||||
bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
|
||||
|
||||
policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
|
||||
accessKey: credentials.AccessKey,
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
@ -187,9 +188,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
|
|||
}
|
||||
|
||||
cloneURLValues := make(url.Values, len(r.Form))
|
||||
for k, v := range r.Form {
|
||||
cloneURLValues[k] = v
|
||||
}
|
||||
maps.Copy(cloneURLValues, r.Form)
|
||||
|
||||
for _, objLock := range []string{
|
||||
xhttp.AmzObjectLockMode,
|
||||
|
@ -224,7 +223,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s
|
|||
// Add groups claim which could be a list. This will ensure that the claim
|
||||
// `jwt:groups` works.
|
||||
if grpsVal, ok := claims["groups"]; ok {
|
||||
if grpsIs, ok := grpsVal.([]interface{}); ok {
|
||||
if grpsIs, ok := grpsVal.([]any); ok {
|
||||
grps := []string{}
|
||||
for _, gI := range grpsIs {
|
||||
if g, ok := gI.(string); ok {
|
||||
|
|
|
@ -92,7 +92,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
|||
}
|
||||
if !quotaCfg.IsValid() {
|
||||
if quotaCfg.Type == "fifo" {
|
||||
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
|
||||
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc quota clear alias/bucket' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
|
||||
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
|
||||
}
|
||||
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
@ -311,7 +312,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD
|
|||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
for p := range strings.SplitSeq(s, ",") {
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
|
@ -735,9 +736,7 @@ type BucketReplicationResyncStatus struct {
|
|||
|
||||
func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) {
|
||||
m = make(map[string]TargetReplicationResyncStatus)
|
||||
for arn, st := range rs.TargetsMap {
|
||||
m[arn] = st
|
||||
}
|
||||
maps.Copy(m, rs.TargetsMap)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -803,9 +804,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
|
|||
} else {
|
||||
cs, mp := getCRCMeta(objInfo, 0, nil)
|
||||
// Set object checksum.
|
||||
for k, v := range cs {
|
||||
meta[k] = v
|
||||
}
|
||||
maps.Copy(meta, cs)
|
||||
isMP = mp
|
||||
if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject {
|
||||
// For objects where checksum is full object, it will be the same.
|
||||
|
@ -969,9 +968,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
|
|||
|
||||
t, _ := tags.ParseObjectTags(oi1.UserTags)
|
||||
oi2Map := make(map[string]string)
|
||||
for k, v := range oi2.UserTags {
|
||||
oi2Map[k] = v
|
||||
}
|
||||
maps.Copy(oi2Map, oi2.UserTags)
|
||||
if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) {
|
||||
return replicateMetadata
|
||||
}
|
||||
|
@ -1770,9 +1767,7 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri
|
|||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
maps.Copy(dst, metadata)
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
|
@ -2954,7 +2949,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < resyncParallelRoutines; i++ {
|
||||
for i := range resyncParallelRoutines {
|
||||
wg.Add(1)
|
||||
workers[i] = make(chan ReplicateObjectInfo, 100)
|
||||
i := i
|
||||
|
@ -3063,7 +3058,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
workers[h%uint64(resyncParallelRoutines)] <- roi
|
||||
}
|
||||
}
|
||||
for i := 0; i < resyncParallelRoutines; i++ {
|
||||
for i := range resyncParallelRoutines {
|
||||
xioutil.SafeClose(workers[i])
|
||||
}
|
||||
wg.Wait()
|
||||
|
@ -3193,11 +3188,9 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []stri
|
|||
<-ctx.Done()
|
||||
return
|
||||
}
|
||||
duration := time.Duration(r.Float64() * float64(time.Minute))
|
||||
if duration < time.Second {
|
||||
duration := max(time.Duration(r.Float64()*float64(time.Minute)),
|
||||
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Second)
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}
|
||||
|
@ -3797,14 +3790,13 @@ func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) (cs map[string]string
|
|||
meta := make(map[string]string)
|
||||
cs, isMP = oi.decryptChecksums(partNum, h)
|
||||
for k, v := range cs {
|
||||
cksum := hash.NewChecksumString(k, v)
|
||||
if cksum == nil {
|
||||
if k == xhttp.AmzChecksumType {
|
||||
continue
|
||||
}
|
||||
if cksum.Valid() {
|
||||
meta[cksum.Type.Key()] = v
|
||||
meta[xhttp.AmzChecksumType] = cs[xhttp.AmzChecksumType]
|
||||
meta[xhttp.AmzChecksumAlgo] = cksum.Type.String()
|
||||
cktype := hash.ChecksumStringToType(k)
|
||||
if cktype.IsSet() {
|
||||
meta[cktype.Key()] = v
|
||||
meta[xhttp.AmzChecksumAlgo] = cktype.String()
|
||||
}
|
||||
}
|
||||
return meta, isMP
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -221,9 +222,7 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) {
|
|||
}
|
||||
if s.Failed.ErrCounts == nil {
|
||||
s.Failed.ErrCounts = make(map[string]int)
|
||||
for k, v := range st.Failed.ErrCounts {
|
||||
s.Failed.ErrCounts[k] = v
|
||||
}
|
||||
maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts)
|
||||
}
|
||||
c.Stats[arn] = &s
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"maps"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -236,9 +237,7 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth {
|
|||
sys.hMutex.RLock()
|
||||
defer sys.hMutex.RUnlock()
|
||||
m := make(map[string]epHealth, len(sys.hc))
|
||||
for k, v := range sys.hc {
|
||||
m[k] = v
|
||||
}
|
||||
maps.Copy(m, sys.hc)
|
||||
return m
|
||||
}
|
||||
|
||||
|
|
|
@ -57,11 +57,9 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) {
|
|||
|
||||
// callhome running on a different node.
|
||||
// sleep for some time and try again.
|
||||
duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur()))
|
||||
if duration < time.Second {
|
||||
duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())),
|
||||
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Second)
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -105,7 +105,7 @@ func init() {
|
|||
gob.Register(madmin.TimeInfo{})
|
||||
gob.Register(madmin.XFSErrorConfigs{})
|
||||
gob.Register(map[string]string{})
|
||||
gob.Register(map[string]interface{}{})
|
||||
gob.Register(map[string]any{})
|
||||
|
||||
// All minio-go and madmin-go API operations shall be performed only once,
|
||||
// another way to look at this is we are turning off retries.
|
||||
|
@ -258,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) {
|
|||
|
||||
if !serverDebugLog {
|
||||
// Disable console logging if server debug log is not enabled
|
||||
noLog := func(string, ...interface{}) {}
|
||||
noLog := func(string, ...any) {}
|
||||
|
||||
consoleapi.LogInfo = noLog
|
||||
consoleapi.LogError = noLog
|
||||
|
@ -761,7 +761,7 @@ func serverHandleEnvVars() {
|
|||
|
||||
domains := env.Get(config.EnvDomain, "")
|
||||
if len(domains) != 0 {
|
||||
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
|
||||
for domainName := range strings.SplitSeq(domains, config.ValueSeparator) {
|
||||
if _, ok := dns2.IsDomainName(domainName); !ok {
|
||||
logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName),
|
||||
"Invalid MINIO_DOMAIN value in environment variable")
|
||||
|
@ -1059,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) {
|
|||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func (a bgCtx) Value(key interface{}) interface{} {
|
||||
func (a bgCtx) Value(key any) any {
|
||||
return a.parent.Value(key)
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ func Test_readFromSecret(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
|
||||
if err != nil {
|
||||
|
@ -155,7 +154,6 @@ MINIO_ROOT_PASSWORD=minio123`,
|
|||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
tmpfile, err := os.CreateTemp(t.TempDir(), "testfile")
|
||||
if err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
@ -78,12 +79,8 @@ func initHelp() {
|
|||
config.BatchSubSys: batch.DefaultKVS,
|
||||
config.BrowserSubSys: browser.DefaultKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
}
|
||||
for k, v := range lambda.DefaultLambdaKVS {
|
||||
kvs[k] = v
|
||||
}
|
||||
maps.Copy(kvs, notify.DefaultNotificationKVS)
|
||||
maps.Copy(kvs, lambda.DefaultLambdaKVS)
|
||||
if globalIsErasure {
|
||||
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
|
||||
kvs[config.HealSubSys] = heal.DefaultKVS
|
||||
|
@ -355,7 +352,9 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
|||
}
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s,
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
xhttp.WithUserAgent(NewHTTPTransport(), func() string {
|
||||
return getUserAgent(getMinioMode())
|
||||
}), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityLDAPSubSys:
|
||||
|
|
|
@ -38,12 +38,12 @@ import (
|
|||
)
|
||||
|
||||
// Save config file to corresponding backend
|
||||
func Save(configFile string, data interface{}) error {
|
||||
func Save(configFile string, data any) error {
|
||||
return quick.SaveConfig(data, configFile, globalEtcdClient)
|
||||
}
|
||||
|
||||
// Load config from backend
|
||||
func Load(configFile string, data interface{}) (quick.Config, error) {
|
||||
func Load(configFile string, data any) (quick.Config, error) {
|
||||
return quick.LoadConfig(configFile, globalEtcdClient, data)
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte)
|
|||
return saveConfig(ctx, objAPI, historyFile, kv)
|
||||
}
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error {
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error {
|
||||
data, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/minio/madmin-go/v3/logger/log"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/logger/target/console"
|
||||
"github.com/minio/minio/internal/logger/target/types"
|
||||
types "github.com/minio/minio/internal/logger/target/loggertypes"
|
||||
"github.com/minio/minio/internal/pubsub"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
@ -101,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
|
|||
|
||||
lastN = make([]log.Info, last)
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
sys.logBuf.Do(func(p any) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok && lg.SendLog(node, logKind) {
|
||||
|
@ -155,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats {
|
|||
// Content returns the console stdout log
|
||||
func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {
|
||||
sys.RLock()
|
||||
sys.logBuf.Do(func(p interface{}) {
|
||||
sys.logBuf.Do(func(p any) {
|
||||
if p != nil {
|
||||
lg, ok := p.(log.Info)
|
||||
if ok {
|
||||
|
@ -181,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType {
|
|||
|
||||
// Send log message 'e' to console and publish to console
|
||||
// log pubsub system
|
||||
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error {
|
||||
func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error {
|
||||
var lg log.Info
|
||||
switch e := entry.(type) {
|
||||
case log.Entry:
|
||||
|
|
|
@ -198,7 +198,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p
|
|||
func (p *scannerMetrics) getCurrentPaths() []string {
|
||||
var res []string
|
||||
prefix := globalLocalNodeName + "/"
|
||||
p.currentPaths.Range(func(key, value interface{}) bool {
|
||||
p.currentPaths.Range(func(key, value any) bool {
|
||||
// We are a bit paranoid, but better miss an entry than crash.
|
||||
name, ok := key.(string)
|
||||
if !ok {
|
||||
|
@ -221,7 +221,7 @@ func (p *scannerMetrics) getCurrentPaths() []string {
|
|||
// (since this is concurrent it may not be 100% reliable)
|
||||
func (p *scannerMetrics) activeDrives() int {
|
||||
var i int
|
||||
p.currentPaths.Range(func(k, v interface{}) bool {
|
||||
p.currentPaths.Range(func(k, v any) bool {
|
||||
i++
|
||||
return true
|
||||
})
|
||||
|
@ -299,7 +299,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
|
|||
m.CollectedAt = time.Now()
|
||||
m.ActivePaths = p.getCurrentPaths()
|
||||
m.LifeTimeOps = make(map[string]uint64, scannerMetricLast)
|
||||
for i := scannerMetric(0); i < scannerMetricLast; i++ {
|
||||
for i := range scannerMetricLast {
|
||||
if n := atomic.LoadUint64(&p.operations[i]); n > 0 {
|
||||
m.LifeTimeOps[i.String()] = n
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics {
|
|||
}
|
||||
|
||||
m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime)
|
||||
for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ {
|
||||
for i := range scannerMetricLastRealtime {
|
||||
lm := p.lastMinute(i)
|
||||
if lm.N > 0 {
|
||||
m.LastMinute.Actions[i.String()] = lm.asTimedAction()
|
||||
|
|
|
@ -78,11 +78,9 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
// Run the data scanner in a loop
|
||||
for {
|
||||
runDataScanner(ctx, objAPI)
|
||||
duration := time.Duration(r.Float64() * float64(scannerCycle.Load()))
|
||||
if duration < time.Second {
|
||||
duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())),
|
||||
// Make sure to sleep at least a second to avoid high CPU ticks.
|
||||
duration = time.Second
|
||||
}
|
||||
time.Second)
|
||||
time.Sleep(duration)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -127,7 +127,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
|
|||
v2 uuid-2 modTime -3m
|
||||
v1 uuid-1 modTime -4m
|
||||
*/
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
fivs[i] = FileInfo{
|
||||
Volume: bucket,
|
||||
Name: obj,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"path"
|
||||
|
@ -99,9 +100,7 @@ func (ats *allTierStats) clone() *allTierStats {
|
|||
}
|
||||
dst := *ats
|
||||
dst.Tiers = make(map[string]tierStats, len(ats.Tiers))
|
||||
for tier, st := range ats.Tiers {
|
||||
dst.Tiers[tier] = st
|
||||
}
|
||||
maps.Copy(dst.Tiers, ats.Tiers)
|
||||
return &dst
|
||||
}
|
||||
|
||||
|
@ -347,9 +346,7 @@ func (e dataUsageEntry) clone() dataUsageEntry {
|
|||
// We operate on a copy from the receiver.
|
||||
if e.Children != nil {
|
||||
ch := make(dataUsageHashMap, len(e.Children))
|
||||
for k, v := range e.Children {
|
||||
ch[k] = v
|
||||
}
|
||||
maps.Copy(ch, e.Children)
|
||||
e.Children = ch
|
||||
}
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ func TestDataUsageUpdate(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
for range dataUsageUpdateDirCycles {
|
||||
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
|
@ -428,7 +428,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
// Changed dir must be picked up in this many cycles.
|
||||
for i := 0; i < dataUsageUpdateDirCycles; i++ {
|
||||
for range dataUsageUpdateDirCycles {
|
||||
got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep)
|
||||
got.Info.NextCycle++
|
||||
if err != nil {
|
||||
|
@ -526,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
|
|||
// generateUsageTestFiles create nFolders * nFiles files of size bytes each.
|
||||
func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) {
|
||||
pl := make([]byte, size)
|
||||
for i := 0; i < nFolders; i++ {
|
||||
for i := range nFolders {
|
||||
name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt")
|
||||
err := os.MkdirAll(filepath.Dir(name), os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for j := 0; j < nFiles; j++ {
|
||||
for j := range nFiles {
|
||||
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
|
||||
err = os.WriteFile(name, pl, os.ModePerm)
|
||||
if err != nil {
|
||||
|
@ -618,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) {
|
|||
}
|
||||
|
||||
// equalAsJSON returns whether the values are equal when encoded as JSON.
|
||||
func equalAsJSON(a, b interface{}) bool {
|
||||
func equalAsJSON(a, b any) bool {
|
||||
aj, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -129,12 +129,9 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
|
|||
|
||||
if failPct > dynamicTimeoutIncreaseThresholdPct {
|
||||
// We are hitting the timeout too often, so increase the timeout by 25%
|
||||
timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100
|
||||
|
||||
// Set upper cap.
|
||||
if timeout > int64(maxDynamicTimeout) {
|
||||
timeout = int64(maxDynamicTimeout)
|
||||
}
|
||||
timeout := min(
|
||||
// Set upper cap.
|
||||
atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout))
|
||||
// Safety, shouldn't happen
|
||||
if timeout < dt.minimum {
|
||||
timeout = dt.minimum
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) {
|
|||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogFailure()
|
||||
}
|
||||
|
||||
|
@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) {
|
|||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogFailure()
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogFailure()
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) {
|
|||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(20 * time.Second)
|
||||
}
|
||||
|
||||
|
@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) {
|
|||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(20 * time.Second)
|
||||
}
|
||||
|
||||
adjusted := timeout.Timeout()
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(20 * time.Second)
|
||||
}
|
||||
|
||||
|
@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) {
|
|||
initial := timeout.Timeout()
|
||||
|
||||
const successTimeout = 20 * time.Second
|
||||
for l := 0; l < 100; l++ {
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range 100 {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(successTimeout)
|
||||
}
|
||||
}
|
||||
|
@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) {
|
|||
rng := rand.New(rand.NewSource(int64(i)))
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 100; i++ {
|
||||
for j := 0; j < 100; j++ {
|
||||
for range 100 {
|
||||
for range 100 {
|
||||
timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64()))
|
||||
}
|
||||
to := timeout.Timeout()
|
||||
|
@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
|||
initial := timeout.Timeout()
|
||||
|
||||
const successTimeout = 20 * time.Second
|
||||
for l := 0; l < 100; l++ {
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range 100 {
|
||||
for range dynamicTimeoutLogSize {
|
||||
timeout.LogSuccess(successTimeout)
|
||||
}
|
||||
}
|
||||
|
@ -166,13 +166,9 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) {
|
|||
func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) {
|
||||
const successTimeout = 20 * time.Second
|
||||
|
||||
for i := 0; i < dynamicTimeoutLogSize; i++ {
|
||||
for range dynamicTimeoutLogSize {
|
||||
rnd := f()
|
||||
duration := time.Duration(float64(successTimeout) * rnd)
|
||||
|
||||
if duration < 100*time.Millisecond {
|
||||
duration = 100 * time.Millisecond
|
||||
}
|
||||
duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond)
|
||||
if duration >= time.Minute {
|
||||
timeout.LogFailure()
|
||||
} else {
|
||||
|
@ -188,7 +184,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) {
|
|||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for try := 0; try < 10; try++ {
|
||||
for range 10 {
|
||||
testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64)
|
||||
}
|
||||
|
||||
|
@ -205,7 +201,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) {
|
|||
|
||||
initial := timeout.Timeout()
|
||||
|
||||
for try := 0; try < 10; try++ {
|
||||
for range 10 {
|
||||
testDynamicTimeoutAdjust(t, timeout, func() float64 {
|
||||
return 1.0 + rand.NormFloat64()
|
||||
})
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
|
@ -117,10 +118,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
|
|||
names = make([]string, 0, BatchSize)
|
||||
)
|
||||
for len(objects) > 0 {
|
||||
N := BatchSize
|
||||
if len(objects) < BatchSize {
|
||||
N = len(objects)
|
||||
}
|
||||
N := min(len(objects), BatchSize)
|
||||
batch := objects[:N]
|
||||
|
||||
// We have to decrypt only ETags of SSE-S3 single-part
|
||||
|
@ -317,9 +315,7 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
|||
// of the client provided context and add the bucket
|
||||
// key, if not present.
|
||||
kmsCtx := kms.Context{}
|
||||
for k, v := range cryptoCtx {
|
||||
kmsCtx[k] = v
|
||||
}
|
||||
maps.Copy(kmsCtx, cryptoCtx)
|
||||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
|
@ -389,9 +385,7 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
|||
// of the client provided context and add the bucket
|
||||
// key, if not present.
|
||||
kmsCtx := kms.Context{}
|
||||
for k, v := range cryptoCtx {
|
||||
kmsCtx[k] = v
|
||||
}
|
||||
maps.Copy(kmsCtx, cryptoCtx)
|
||||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
|
|
|
@ -384,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
|||
// Simple useful utilities
|
||||
repeat = func(k int64, n int) []int64 {
|
||||
a := []int64{}
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
a = append(a, k)
|
||||
}
|
||||
return a
|
||||
|
@ -471,10 +471,7 @@ func TestGetDecryptedRange(t *testing.T) {
|
|||
// round up the lbPartOffset
|
||||
// to the end of the
|
||||
// corresponding DARE package
|
||||
lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz
|
||||
if lbPkgEndOffset > v {
|
||||
lbPkgEndOffset = v
|
||||
}
|
||||
lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v)
|
||||
bytesToDrop := v - lbPkgEndOffset
|
||||
|
||||
// Last segment to update `l`
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"fmt"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
|
@ -122,9 +122,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar
|
|||
// eyes that we prefer a sorted setCount slice for the
|
||||
// subsequent function to figure out the right common
|
||||
// divisor, it avoids loops.
|
||||
sort.Slice(setCounts, func(i, j int) bool {
|
||||
return setCounts[i] < setCounts[j]
|
||||
})
|
||||
slices.Sort(setCounts)
|
||||
|
||||
return setCounts
|
||||
}
|
||||
|
|
|
@ -55,7 +55,6 @@ func TestCreateServerEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
srvCtxt := serverCtxt{}
|
||||
err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt)
|
||||
|
@ -85,7 +84,6 @@ func TestGetDivisibleSize(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotGCD := getDivisibleSize(testCase.totalSizes)
|
||||
if testCase.result != gotGCD {
|
||||
|
@ -172,7 +170,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||
for i, arg := range testCase.args {
|
||||
|
@ -294,7 +291,6 @@ func TestGetSetIndexes(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
argPatterns := make([]ellipses.ArgPattern, len(testCase.args))
|
||||
for i, arg := range testCase.args {
|
||||
|
@ -637,7 +633,6 @@ func TestParseEndpointSet(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotEs, err := parseEndpointSet(0, testCase.arg)
|
||||
if err != nil && testCase.success {
|
||||
|
|
|
@ -312,7 +312,6 @@ func TestCreateEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
i := i
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
var srvCtxt serverCtxt
|
||||
|
|
|
@ -136,10 +136,7 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64
|
|||
shardSize := e.ShardSize()
|
||||
shardFileSize := e.ShardFileSize(totalLength)
|
||||
endShard := (startOffset + length) / e.blockSize
|
||||
tillOffset := endShard*shardSize + shardSize
|
||||
if tillOffset > shardFileSize {
|
||||
tillOffset = shardFileSize
|
||||
}
|
||||
tillOffset := min(endShard*shardSize+shardSize, shardFileSize)
|
||||
return tillOffset
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
|
|||
var mu sync.Mutex
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, i := range r.Perm(len(disks)) {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
|
|
@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
|
|||
buf := &bytes.Buffer{}
|
||||
|
||||
// Verify erasure.Decode() for random offsets and lengths.
|
||||
for i := 0; i < iterations; i++ {
|
||||
for range iterations {
|
||||
offset := r.Int63n(length)
|
||||
readLen := r.Int63n(length - offset)
|
||||
|
||||
|
@ -308,17 +308,16 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
|
|||
b.Fatalf("failed to create erasure test file: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < dataDown; i++ {
|
||||
for i := range dataDown {
|
||||
writers[i] = nil
|
||||
}
|
||||
for i := data; i < data+parityDown; i++ {
|
||||
writers[i] = nil
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
bitrotReaders := make([]io.ReaderAt, len(disks))
|
||||
for index, disk := range disks {
|
||||
if writers[index] == nil {
|
||||
|
|
|
@ -172,17 +172,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
|
|||
buffer := make([]byte, blockSizeV2, 2*blockSizeV2)
|
||||
content := make([]byte, size)
|
||||
|
||||
for i := 0; i < dataDown; i++ {
|
||||
for i := range dataDown {
|
||||
disks[i] = OfflineDisk
|
||||
}
|
||||
for i := data; i < data+parityDown; i++ {
|
||||
disks[i] = OfflineDisk
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
writers := make([]io.Writer, len(disks))
|
||||
for i, disk := range disks {
|
||||
if disk == OfflineDisk {
|
||||
|
|
|
@ -102,7 +102,7 @@ func TestErasureHeal(t *testing.T) {
|
|||
// setup stale disks for the test case
|
||||
staleDisks := make([]StorageAPI, len(disks))
|
||||
copy(staleDisks, disks)
|
||||
for j := 0; j < len(staleDisks); j++ {
|
||||
for j := range staleDisks {
|
||||
if j < test.offDisks {
|
||||
readers[j] = nil
|
||||
} else {
|
||||
|
|
|
@ -175,7 +175,7 @@ func TestListOnlineDisks(t *testing.T) {
|
|||
fourNanoSecs := time.Unix(4, 0).UTC()
|
||||
modTimesThreeNone := make([]time.Time, 16)
|
||||
modTimesThreeFour := make([]time.Time, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
for i := range 16 {
|
||||
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
|
||||
// to be tampered with.
|
||||
if i > 12 {
|
||||
|
@ -244,7 +244,6 @@ func TestListOnlineDisks(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
|
||||
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
|
@ -350,7 +349,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
|||
fourNanoSecs := time.Unix(4, 0).UTC()
|
||||
modTimesThreeNone := make([]time.Time, 16)
|
||||
modTimesThreeFour := make([]time.Time, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
for i := range 16 {
|
||||
// Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one
|
||||
// to be tampered with.
|
||||
if i > 12 {
|
||||
|
@ -419,7 +418,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
test := test
|
||||
t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
|
||||
_, err := obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
|
||||
|
@ -753,7 +751,7 @@ func TestCommonParities(t *testing.T) {
|
|||
}
|
||||
for idx, test := range tests {
|
||||
var metaArr []FileInfo
|
||||
for i := 0; i < 12; i++ {
|
||||
for i := range 12 {
|
||||
fi := test.fi1
|
||||
if i%2 == 0 {
|
||||
fi = test.fi2
|
||||
|
|
|
@ -116,7 +116,6 @@ func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string,
|
|||
func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
// we ignore disk not found errors
|
||||
|
|
|
@ -296,7 +296,6 @@ func TestIsObjectDangling(t *testing.T) {
|
|||
// Add new cases as seen
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs)
|
||||
if !gotMeta.Equals(testCase.expectedMeta) {
|
||||
|
|
|
@ -204,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
|
|||
g := errgroup.WithNErrs(len(disks))
|
||||
// Read `xl.meta` in parallel across disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestDiskCount(t *testing.T) {
|
|||
// of errors into a single maximal error with in the list.
|
||||
func TestReduceErrs(t *testing.T) {
|
||||
canceledErrs := make([]error, 0, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled))
|
||||
}
|
||||
// List all of all test cases to validate various cases of reduce errors.
|
||||
|
@ -222,7 +222,7 @@ func Test_hashOrder(t *testing.T) {
|
|||
var tmp [16]byte
|
||||
rng.Read(tmp[:])
|
||||
prefix := hex.EncodeToString(tmp[:])
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
rng.Read(tmp[:])
|
||||
|
||||
y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x)
|
||||
|
|
|
@ -408,7 +408,6 @@ func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbuc
|
|||
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -189,7 +189,7 @@ func TestFindFileInfoInQuorum(t *testing.T) {
|
|||
commonNumVersions := 2
|
||||
numVersionsInQuorum := make([]int, 16)
|
||||
numVersionsNoQuorum := make([]int, 16)
|
||||
for i := 0; i < 16; i++ {
|
||||
for i := range 16 {
|
||||
if i < 4 {
|
||||
continue
|
||||
}
|
||||
|
@ -269,7 +269,6 @@ func TestFindFileInfoInQuorum(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum)
|
||||
_, ok1 := err.(InsufficientReadQuorum)
|
||||
|
@ -316,7 +315,7 @@ func TestTransitionInfoEquals(t *testing.T) {
|
|||
}
|
||||
|
||||
var i uint
|
||||
for i = 0; i < 8; i++ {
|
||||
for i = range uint(8) {
|
||||
fi := FileInfo{
|
||||
TransitionTier: inputs[0].tier,
|
||||
TransitionedObjName: inputs[0].remoteObjName,
|
||||
|
|
|
@ -322,7 +322,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
|
|||
uploads = append(uploads, MultipartInfo{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))),
|
||||
UploadID: base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadID)),
|
||||
Initiated: startTime,
|
||||
})
|
||||
populatedUploadIDs.Add(uploadID)
|
||||
|
@ -393,6 +393,12 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if object doesn't exist return error for If-Match conditional requests
|
||||
// If-None-Match should be allowed to proceed for non-existent objects
|
||||
if err != nil && opts.HasIfMatch && (isErrObjectNotFound(err) || isErrVersionNotFound(err)) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
userDefined := cloneMSS(opts.UserDefined)
|
||||
|
@ -498,7 +504,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||
partsMetadata[index].Metadata = userDefined
|
||||
}
|
||||
uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano())
|
||||
uploadID := base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadUUID)))
|
||||
uploadID := base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadUUID))
|
||||
uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID)
|
||||
|
||||
// Write updated `xl.meta` to all disks.
|
||||
|
@ -540,7 +546,6 @@ func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, src
|
|||
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -820,7 +825,6 @@ func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI
|
|||
objectParts := make([][]string, len(onlineDisks))
|
||||
// List uploaded parts from drives.
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if onlineDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -995,7 +999,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP
|
|||
objectPartInfos := make([][]*ObjectPartInfo, len(disks))
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -1114,6 +1117,12 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
// if object doesn't exist return error for If-Match conditional requests
|
||||
// If-None-Match should be allowed to proceed for non-existent objects
|
||||
if err != nil && opts.HasIfMatch && (isErrObjectNotFound(err) || isErrVersionNotFound(err)) {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
fi, partsMetadata, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true)
|
||||
|
@ -1161,6 +1170,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
Err: fmt.Errorf("checksum type mismatch. got %q (%s) expected %q (%s)", checksumType.String(), checksumType.ObjType(), opts.WantChecksum.Type.String(), opts.WantChecksum.Type.ObjType()),
|
||||
}
|
||||
}
|
||||
checksumType |= hash.ChecksumMultipart | hash.ChecksumIncludesMultipart
|
||||
}
|
||||
|
||||
var checksumCombined []byte
|
||||
|
@ -1509,17 +1519,10 @@ func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, objec
|
|||
auditObjectErasureSet(ctx, "AbortMultipartUpload", object, &er)
|
||||
}
|
||||
|
||||
// Validates if upload ID exists.
|
||||
if _, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false); err != nil {
|
||||
if errors.Is(err, errVolumeNotFound) {
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
return toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// Cleanup all uploaded parts.
|
||||
er.deleteAll(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID))
|
||||
defer er.deleteAll(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID))
|
||||
|
||||
// Successfully purged.
|
||||
return nil
|
||||
// Validates if upload ID exists.
|
||||
_, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false)
|
||||
return toObjectErr(err, bucket, object, uploadID)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"path"
|
||||
"runtime"
|
||||
|
@ -542,7 +543,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
|
|||
disks := er.getDisks()
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -575,7 +575,6 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
|
|||
rawFileInfos := make([]RawFileInfo, len(disks))
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -828,6 +827,13 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
|||
minDisks = er.setDriveCount - er.defaultParityCount
|
||||
}
|
||||
|
||||
if minDisks == er.setDriveCount/2 {
|
||||
// when data and parity are same we must atleast
|
||||
// wait for response from 1 extra drive to avoid
|
||||
// split-brain.
|
||||
minDisks++
|
||||
}
|
||||
|
||||
calcQuorum := func(metaArr []FileInfo, errs []error) (FileInfo, []FileInfo, []StorageAPI, time.Time, string, error) {
|
||||
readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount)
|
||||
if err != nil {
|
||||
|
@ -1022,7 +1028,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str
|
|||
dataDirs := make([]string, len(disks))
|
||||
// Rename file on all underlying storage disks.
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -1272,6 +1277,12 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
// if object doesn't exist return error for If-Match conditional requests
|
||||
// If-None-Match should be allowed to proceed for non-existent objects
|
||||
if err != nil && opts.HasIfMatch && (isErrObjectNotFound(err) || isErrVersionNotFound(err)) {
|
||||
return objInfo, err
|
||||
}
|
||||
}
|
||||
|
||||
// Validate input data size and it can never be less than -1.
|
||||
|
@ -1624,7 +1635,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
|
|||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -1829,7 +1839,6 @@ func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object
|
|||
}
|
||||
g := errgroup.WithNErrs(len(onlineDisks))
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if onlineDisks[index] == nil {
|
||||
return nil
|
||||
|
@ -1855,7 +1864,6 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string
|
|||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if disks[index] == nil {
|
||||
return nil
|
||||
|
@ -2215,9 +2223,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
|
|||
return ObjectInfo{}, err
|
||||
}
|
||||
}
|
||||
for k, v := range objInfo.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
maps.Copy(fi.Metadata, objInfo.UserDefined)
|
||||
fi.ModTime = opts.MTime
|
||||
fi.VersionID = opts.VersionID
|
||||
|
||||
|
@ -2287,9 +2293,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
|
|||
|
||||
fi.Metadata[xhttp.AmzObjectTagging] = tags
|
||||
fi.ReplicationState = opts.PutReplicationState()
|
||||
for k, v := range opts.UserDefined {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
maps.Copy(fi.Metadata, opts.UserDefined)
|
||||
|
||||
if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
|
@ -2307,7 +2311,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o
|
|||
|
||||
// Start writing `xl.meta` to all disks in parallel.
|
||||
for index := range onlineDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if onlineDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
|
|
@ -112,7 +112,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
|
|||
t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
|
||||
}
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{})
|
||||
if err != nil {
|
||||
|
@ -625,7 +624,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for f := 0; f < 2; f++ {
|
||||
for f := range 2 {
|
||||
diskErrors := make(map[int]error)
|
||||
for i := 0; i <= f; i++ {
|
||||
diskErrors[i] = nil
|
||||
|
@ -774,7 +773,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||
// in a 16 disk Erasure setup. The original disks are 'replaced' with
|
||||
// naughtyDisks that fail after 'f' successful StorageAPI method
|
||||
// invocations, where f - [0,4)
|
||||
for f := 0; f < 2; f++ {
|
||||
for f := range 2 {
|
||||
diskErrors := make(map[int]error)
|
||||
for i := 0; i <= f; i++ {
|
||||
diskErrors[i] = nil
|
||||
|
@ -837,7 +836,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
|
|||
// in a 16 disk Erasure setup. The original disks are 'replaced' with
|
||||
// naughtyDisks that fail after 'f' successful StorageAPI method
|
||||
// invocations, where f - [0,2)
|
||||
for f := 0; f < 2; f++ {
|
||||
for f := range 2 {
|
||||
t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) {
|
||||
diskErrors := make(map[int]error)
|
||||
for i := 0; i <= f; i++ {
|
||||
|
@ -1109,7 +1108,6 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||
{parts7, errs7, 11, 11, parts7SC, nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.(*testing.T).Run("", func(t *testing.T) {
|
||||
globalStorageClass.Update(tt.storageClassCfg)
|
||||
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks)))
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -117,12 +118,7 @@ func (pd *PoolDecommissionInfo) bucketPop(bucket string) bool {
|
|||
}
|
||||
|
||||
func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool {
|
||||
for _, b := range pd.DecommissionedBuckets {
|
||||
if b == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(pd.DecommissionedBuckets, bucket)
|
||||
}
|
||||
|
||||
func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) {
|
||||
|
@ -572,6 +568,7 @@ func newPoolMeta(z *erasureServerPools, prevMeta poolMeta) poolMeta {
|
|||
for _, currentPool := range prevMeta.Pools {
|
||||
// Preserve any current pool status.
|
||||
if currentPool.CmdLine == pool.endpoints.CmdLine {
|
||||
currentPool.ID = idx
|
||||
newMeta.Pools = append(newMeta.Pools, currentPool)
|
||||
skip = true
|
||||
break
|
||||
|
@ -792,8 +789,6 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
|||
}
|
||||
|
||||
for setIdx, set := range pool.sets {
|
||||
set := set
|
||||
|
||||
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
|
||||
if lc == nil {
|
||||
return false
|
||||
|
@ -901,7 +896,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
|||
}
|
||||
|
||||
// gr.Close() is ensured by decommissionObject().
|
||||
for try := 0; try < 3; try++ {
|
||||
for range 3 {
|
||||
if version.IsRemote() {
|
||||
if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{
|
||||
VersionID: versionID,
|
||||
|
|
|
@ -176,7 +176,6 @@ func TestPoolMetaValidate(t *testing.T) {
|
|||
|
||||
t.Parallel()
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
update, err := testCase.meta.validate(testCase.pools)
|
||||
if testCase.expectedErr {
|
||||
|
|
|
@ -98,12 +98,10 @@ type rebalanceInfo struct {
|
|||
|
||||
// rebalanceMeta contains information pertaining to an ongoing rebalance operation.
|
||||
type rebalanceMeta struct {
|
||||
cancel context.CancelFunc `msg:"-"` // to be invoked on rebalance-stop
|
||||
lastRefreshedAt time.Time `msg:"-"`
|
||||
StoppedAt time.Time `msg:"stopTs"` // Time when rebalance-stop was issued.
|
||||
ID string `msg:"id"` // ID of the ongoing rebalance operation
|
||||
PercentFreeGoal float64 `msg:"pf"` // Computed from total free space and capacity at the start of rebalance
|
||||
PoolStats []*rebalanceStats `msg:"rss"` // Per-pool rebalance stats keyed by pool index
|
||||
StoppedAt time.Time `msg:"stopTs"` // Time when rebalance-stop was issued.
|
||||
ID string `msg:"id"` // ID of the ongoing rebalance operation
|
||||
PercentFreeGoal float64 `msg:"pf"` // Computed from total free space and capacity at the start of rebalance
|
||||
PoolStats []*rebalanceStats `msg:"rss"` // Per-pool rebalance stats keyed by pool index
|
||||
}
|
||||
|
||||
var errRebalanceNotStarted = errors.New("rebalance not started")
|
||||
|
@ -313,8 +311,6 @@ func (r *rebalanceMeta) loadWithOpts(ctx context.Context, store objectIO, opts O
|
|||
return err
|
||||
}
|
||||
|
||||
r.lastRefreshedAt = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -341,7 +337,8 @@ func (r *rebalanceMeta) save(ctx context.Context, store objectIO) error {
|
|||
return r.saveWithOpts(ctx, store, ObjectOptions{})
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) IsRebalanceStarted() bool {
|
||||
func (z *erasureServerPools) IsRebalanceStarted(ctx context.Context) bool {
|
||||
_ = z.loadRebalanceMeta(ctx)
|
||||
z.rebalMu.RLock()
|
||||
defer z.rebalMu.RUnlock()
|
||||
|
||||
|
@ -394,12 +391,14 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
|
|||
var (
|
||||
quit bool
|
||||
traceMsg string
|
||||
notify bool // if status changed, notify nodes to reload rebalance metadata
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case rebalErr := <-doneCh:
|
||||
quit = true
|
||||
notify = true
|
||||
now := time.Now()
|
||||
var status rebalStatus
|
||||
|
||||
|
@ -421,12 +420,16 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int)
|
|||
z.rebalMu.Unlock()
|
||||
|
||||
case <-timer.C:
|
||||
notify = false
|
||||
traceMsg = fmt.Sprintf("saved at %s", time.Now())
|
||||
}
|
||||
|
||||
stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg)
|
||||
err := z.saveRebalanceStats(GlobalContext, poolIdx, rebalSaveStats)
|
||||
stopFn(0, err)
|
||||
if err == nil && notify {
|
||||
globalNotificationSys.LoadRebalanceMeta(GlobalContext, false)
|
||||
}
|
||||
rebalanceLogIf(GlobalContext, err)
|
||||
|
||||
if quit {
|
||||
|
@ -580,8 +583,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
|
|||
}
|
||||
|
||||
for setIdx, set := range pool.sets {
|
||||
set := set
|
||||
|
||||
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
|
||||
if lc == nil {
|
||||
return false
|
||||
|
@ -594,7 +595,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
|
|||
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -689,7 +689,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string,
|
|||
continue
|
||||
}
|
||||
|
||||
for try := 0; try < 3; try++ {
|
||||
for range 3 {
|
||||
// GetObjectReader.Close is called by rebalanceObject
|
||||
gr, err := set.GetObjectNInfo(ctx,
|
||||
bucket,
|
||||
|
@ -803,13 +803,20 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int
|
|||
ctx = lkCtx.Context()
|
||||
noLockOpts := ObjectOptions{NoLock: true}
|
||||
r := &rebalanceMeta{}
|
||||
if err := r.loadWithOpts(ctx, z.serverPools[0], noLockOpts); err != nil {
|
||||
err = r.loadWithOpts(ctx, z.serverPools[0], noLockOpts)
|
||||
if err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
z.rebalMu.Lock()
|
||||
defer z.rebalMu.Unlock()
|
||||
|
||||
// if not found, we store the memory metadata back
|
||||
// when rebalance status changed, will notify all nodes update status to memory, we can treat the memory metadata is the latest status
|
||||
if errors.Is(err, errConfigNotFound) {
|
||||
r = z.rebalMeta
|
||||
}
|
||||
|
||||
switch opts {
|
||||
case rebalSaveStoppedAt:
|
||||
r.StoppedAt = time.Now()
|
||||
|
@ -933,7 +940,7 @@ func (z *erasureServerPools) StartRebalance() {
|
|||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(GlobalContext)
|
||||
z.rebalMeta.cancel = cancel // to be used when rebalance-stop is called
|
||||
z.rebalCancel = cancel // to be used when rebalance-stop is called
|
||||
z.rebalMu.Unlock()
|
||||
|
||||
z.rebalMu.RLock()
|
||||
|
@ -976,10 +983,9 @@ func (z *erasureServerPools) StopRebalance() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if cancel := r.cancel; cancel != nil {
|
||||
// cancel != nil only on pool leaders
|
||||
r.cancel = nil
|
||||
if cancel := z.rebalCancel; cancel != nil {
|
||||
cancel()
|
||||
z.rebalCancel = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -53,8 +53,9 @@ type erasureServerPools struct {
|
|||
poolMetaMutex sync.RWMutex
|
||||
poolMeta poolMeta
|
||||
|
||||
rebalMu sync.RWMutex
|
||||
rebalMeta *rebalanceMeta
|
||||
rebalMu sync.RWMutex
|
||||
rebalMeta *rebalanceMeta
|
||||
rebalCancel context.CancelFunc
|
||||
|
||||
deploymentID [16]byte
|
||||
distributionAlgo string
|
||||
|
@ -420,7 +421,6 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b
|
|||
nSets := make([]int, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
// Skip suspended pools or pools participating in rebalance for any new
|
||||
// I/O.
|
||||
if z.IsSuspended(index) || z.IsPoolRebalancing(index) {
|
||||
|
@ -635,15 +635,18 @@ func (z *erasureServerPools) getPoolIdxNoLock(ctx context.Context, bucket, objec
|
|||
// if none are found falls back to most available space pool, this function is
|
||||
// designed to be only used by PutObject, CopyObject (newObject creation) and NewMultipartUpload.
|
||||
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) {
|
||||
idx, err = z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
||||
pinfo, _, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, ObjectOptions{
|
||||
SkipDecommissioned: true,
|
||||
SkipRebalancing: true,
|
||||
})
|
||||
|
||||
if err != nil && !isErrObjectNotFound(err) {
|
||||
return idx, err
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if isErrObjectNotFound(err) {
|
||||
idx = pinfo.Index
|
||||
if isErrObjectNotFound(err) || pinfo.Err == nil {
|
||||
// will generate a temp object
|
||||
idx = z.getAvailablePoolIdx(ctx, bucket, object, size)
|
||||
if idx < 0 {
|
||||
return -1, toObjectErr(errDiskFull)
|
||||
|
@ -657,7 +660,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
|
|||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return z.serverPools[index].Shutdown(ctx)
|
||||
}, index)
|
||||
|
@ -709,7 +711,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool)
|
|||
storageInfos := make([]StorageInfo, len(z.serverPools))
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
for index := range z.serverPools {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics)
|
||||
return nil
|
||||
|
@ -1089,6 +1090,10 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
|||
|
||||
object = encodeDirObject(object)
|
||||
if z.SinglePool() {
|
||||
_, err := z.getPoolIdx(ctx, bucket, object, data.Size())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
|
||||
}
|
||||
|
||||
|
@ -1178,6 +1183,13 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob
|
|||
return z.deleteObjectFromAllPools(ctx, bucket, object, opts, noReadQuorumPools)
|
||||
}
|
||||
|
||||
// All replication requests needs to go to pool with the object.
|
||||
if opts.ReplicationRequest {
|
||||
objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts)
|
||||
objInfo.Name = decodeDirObject(object)
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
for _, pool := range z.serverPools {
|
||||
objInfo, err := pool.DeleteObject(ctx, bucket, object, opts)
|
||||
if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
|
@ -1254,7 +1266,6 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
|||
|
||||
eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools))
|
||||
for i, pool := range z.serverPools {
|
||||
i := i
|
||||
pool := pool
|
||||
eg.Go(func() error {
|
||||
dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts)
|
||||
|
@ -1816,6 +1827,10 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
|
|||
}
|
||||
|
||||
if z.SinglePool() {
|
||||
_, err := z.getPoolIdx(ctx, bucket, object, data.Size())
|
||||
if err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
|
@ -2226,7 +2241,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
|||
|
||||
for poolIdx, erasureSet := range z.serverPools {
|
||||
for setIdx, set := range erasureSet.sets {
|
||||
set := set
|
||||
listOut := make(chan metaCacheEntry, 1)
|
||||
entries = append(entries, listOut)
|
||||
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)
|
||||
|
|
|
@ -392,7 +392,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||
var lk sync.Mutex
|
||||
for i := range setCount {
|
||||
lockerEpSet := set.NewStringSet()
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
wg.Add(1)
|
||||
go func(i int, endpoint Endpoint) {
|
||||
defer wg.Done()
|
||||
|
@ -415,7 +415,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||
defer wg.Done()
|
||||
|
||||
var innerWg sync.WaitGroup
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
if disk == nil {
|
||||
continue
|
||||
|
@ -593,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo {
|
|||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].StorageInfo(ctx)
|
||||
return nil
|
||||
|
@ -618,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag
|
|||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics)
|
||||
return nil
|
||||
|
@ -641,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
|
|||
g := errgroup.WithNErrs(len(s.sets))
|
||||
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return s.sets[index].Shutdown(ctx)
|
||||
}, index)
|
||||
|
@ -705,7 +702,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
|
|||
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
// we ignore disk not found errors
|
||||
|
|
|
@ -40,13 +40,12 @@ func BenchmarkCrcHash(b *testing.B) {
|
|||
{1024},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
testCase := testCase
|
||||
key := randString(testCase.key)
|
||||
b.Run("", func(b *testing.B) {
|
||||
b.SetBytes(1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
crcHashMod(key, 16)
|
||||
}
|
||||
})
|
||||
|
@ -65,13 +64,12 @@ func BenchmarkSipHash(b *testing.B) {
|
|||
{1024},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
testCase := testCase
|
||||
key := randString(testCase.key)
|
||||
b.Run("", func(b *testing.B) {
|
||||
b.SetBytes(1024)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
sipHashMod(key, 16, testUUID)
|
||||
}
|
||||
})
|
||||
|
@ -164,7 +162,7 @@ func TestNewErasureSets(t *testing.T) {
|
|||
|
||||
nDisks := 16 // Maximum disks.
|
||||
var erasureDisks []string
|
||||
for i := 0; i < nDisks; i++ {
|
||||
for range nDisks {
|
||||
// Do not attempt to create this path, the test validates
|
||||
// so that newErasureSets initializes non existing paths
|
||||
// and successfully returns initialized object layer.
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -175,7 +176,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
|
|||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
di := madmin.Disk{
|
||||
Endpoint: endpoints[index].String(),
|
||||
|
@ -219,9 +219,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks
|
|||
di.Metrics.LastMinute[k] = v.asTimedAction()
|
||||
}
|
||||
}
|
||||
for k, v := range info.Metrics.APICalls {
|
||||
di.Metrics.APICalls[k] = v
|
||||
}
|
||||
maps.Copy(di.Metrics.APICalls, info.Metrics.APICalls)
|
||||
if info.Total > 0 {
|
||||
di.Utilization = float64(info.Used / info.Total * 100)
|
||||
}
|
||||
|
@ -287,7 +285,6 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new
|
|||
infos := make([]DiskInfo, len(disks))
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for _, i := range r.Perm(len(disks)) {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
|
|
@ -99,7 +99,7 @@ func fmtGenMain(ctxt *cli.Context) {
|
|||
format := newFormatErasureV3(setCount, setDriveCount)
|
||||
format.ID = deploymentID
|
||||
for i := range setCount { // for each erasure set
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
if deploymentID != "" {
|
||||
|
|
|
@ -159,7 +159,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
|
|||
|
||||
for i := range numSets {
|
||||
format.Erasure.Sets[i] = make([]string, setLen)
|
||||
for j := 0; j < setLen; j++ {
|
||||
for j := range setLen {
|
||||
format.Erasure.Sets[i][j] = mustGetUUID()
|
||||
}
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ func formatGetBackendErasureVersion(b []byte) (string, error) {
|
|||
return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version)
|
||||
}
|
||||
if meta.Format != formatBackendErasure && meta.Format != formatBackendErasureSingle {
|
||||
return "", fmt.Errorf(`found backend type %s, expected %s or %s - to migrate to a supported backend visit https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html`, meta.Format, formatBackendErasure, formatBackendErasureSingle)
|
||||
return "", fmt.Errorf(`found backend type %s, expected %s or %s - to migrate to a supported backend visit https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-migrate-fs-gateway.html`, meta.Format, formatBackendErasure, formatBackendErasureSingle)
|
||||
}
|
||||
// Erasure backend found, proceed to detect version.
|
||||
format := &formatErasureVersionDetect{}
|
||||
|
@ -324,7 +324,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur
|
|||
|
||||
// Load format from each disk in parallel
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -530,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format
|
|||
|
||||
// Write `format.json` to all disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if formats[index] == nil {
|
||||
return errDiskNotFound
|
||||
|
@ -566,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag
|
|||
storageDisks := make([]StorageAPI, len(endpoints))
|
||||
g := errgroup.WithNErrs(len(endpoints))
|
||||
for index := range endpoints {
|
||||
index := index
|
||||
g.Go(func() (err error) {
|
||||
storageDisks[index], err = newStorageAPI(endpoints[index], opts)
|
||||
return err
|
||||
|
@ -600,7 +597,6 @@ func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool {
|
|||
func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error {
|
||||
g := errgroup.WithNErrs(len(formats))
|
||||
for i := range formats {
|
||||
i := i
|
||||
g.Go(func() error {
|
||||
if formats[i] == nil || !endpoints[i].IsLocal {
|
||||
return nil
|
||||
|
@ -641,7 +637,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
|||
|
||||
for i := range setCount {
|
||||
hostCount := make(map[string]int, setDriveCount)
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
|
@ -662,7 +658,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
|||
return
|
||||
}
|
||||
logger.Info(" * Set %v:", i+1)
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for j := range setDriveCount {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
logger.Info(" - Drive: %s", disk.String())
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestFixFormatV3(t *testing.T) {
|
|||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 8)
|
||||
|
||||
for j := 0; j < 8; j++ {
|
||||
for j := range 8 {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[0][j]
|
||||
formats[j] = newFormat
|
||||
|
@ -79,7 +79,7 @@ func TestFormatErasureEmpty(t *testing.T) {
|
|||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 16)
|
||||
|
||||
for j := 0; j < 16; j++ {
|
||||
for j := range 16 {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[0][j]
|
||||
formats[j] = newFormat
|
||||
|
@ -276,8 +276,8 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) {
|
|||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 32)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
|
@ -390,18 +390,17 @@ func BenchmarkGetFormatErasureInQuorumOld(b *testing.B) {
|
|||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 15*200)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, _ = getFormatErasureInQuorumOld(formats)
|
||||
}
|
||||
}
|
||||
|
@ -414,18 +413,17 @@ func BenchmarkGetFormatErasureInQuorum(b *testing.B) {
|
|||
format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1
|
||||
formats := make([]*formatErasureV3, 15*200)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, _ = getFormatErasureInQuorum(formats)
|
||||
}
|
||||
}
|
||||
|
@ -440,8 +438,8 @@ func TestNewFormatSets(t *testing.T) {
|
|||
formats := make([]*formatErasureV3, 32)
|
||||
errs := make([]error, 32)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
for i := range setCount {
|
||||
for j := range setDriveCount {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
formats[i*setDriveCount+j] = newFormat
|
||||
|
|
|
@ -98,7 +98,7 @@ func (m *minioFileInfo) IsDir() bool {
|
|||
return m.isDir
|
||||
}
|
||||
|
||||
func (m *minioFileInfo) Sys() interface{} {
|
||||
func (m *minioFileInfo) Sys() any {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -316,7 +316,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
claims := make(map[string]interface{})
|
||||
claims := make(map[string]any)
|
||||
claims[expClaim] = UTCNow().Add(expiryDur).Unix()
|
||||
|
||||
claims[ldapUser] = lookupResult.NormDN
|
||||
|
|
|
@ -33,14 +33,14 @@ var globalRemoteFTPClientTransport = NewRemoteTargetHTTPTransport(true)()
|
|||
type minioLogger struct{}
|
||||
|
||||
// Print implement Logger
|
||||
func (log *minioLogger) Print(sessionID string, message interface{}) {
|
||||
func (log *minioLogger) Print(sessionID string, message any) {
|
||||
if serverDebugLog {
|
||||
fmt.Printf("%s %s\n", sessionID, message)
|
||||
}
|
||||
}
|
||||
|
||||
// Printf implement Logger
|
||||
func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) {
|
||||
func (log *minioLogger) Printf(sessionID string, format string, v ...any) {
|
||||
if serverDebugLog {
|
||||
if sessionID != "" {
|
||||
fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...))
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"net/http"
|
||||
"path"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -396,18 +397,16 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler {
|
|||
if k == "delimiter" { // delimiters are allowed to have `.` or `..`
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
if hasBadPathComponent(v) {
|
||||
if ok {
|
||||
tc.FuncName = "handler.ValidRequest"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
if slices.ContainsFunc(vv, hasBadPathComponent) {
|
||||
if ok {
|
||||
tc.FuncName = "handler.ValidRequest"
|
||||
tc.ResponseRecorder.LogErrBody = true
|
||||
}
|
||||
|
||||
defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r))
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasMultipleAuth(r) {
|
||||
|
|
|
@ -90,7 +90,7 @@ var isHTTPHeaderSizeTooLargeTests = []struct {
|
|||
|
||||
func generateHeader(size, usersize int) http.Header {
|
||||
header := http.Header{}
|
||||
for i := 0; i < size; i++ {
|
||||
for i := range size {
|
||||
header.Set(strconv.Itoa(i), "")
|
||||
}
|
||||
userlength := 0
|
||||
|
@ -136,7 +136,6 @@ var containsReservedMetadataTests = []struct {
|
|||
|
||||
func TestContainsReservedMetadata(t *testing.T) {
|
||||
for _, test := range containsReservedMetadataTests {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
contains := containsReservedMetadata(test.header)
|
||||
if contains && !test.shouldFail {
|
||||
|
@ -201,7 +200,7 @@ func Benchmark_hasBadPathComponent(t *testing.B) {
|
|||
t.Run(tt.name, func(b *testing.B) {
|
||||
b.SetBytes(int64(len(tt.input)))
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
if got := hasBadPathComponent(tt.input); got != tt.want {
|
||||
t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
|
|
@ -292,7 +292,7 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string)
|
|||
return contentEnc
|
||||
}
|
||||
var newEncs []string
|
||||
for _, enc := range strings.Split(contentEnc, ",") {
|
||||
for enc := range strings.SplitSeq(contentEnc, ",") {
|
||||
if enc != streamingContentEncoding {
|
||||
newEncs = append(newEncs, enc)
|
||||
}
|
||||
|
|
|
@ -54,10 +54,7 @@ func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err er
|
|||
|
||||
case h.IsSuffixLength:
|
||||
specifiedLen := -h.Start
|
||||
rangeLength = specifiedLen
|
||||
if specifiedLen > resourceSize {
|
||||
rangeLength = resourceSize
|
||||
}
|
||||
rangeLength = min(specifiedLen, resourceSize)
|
||||
|
||||
case h.Start >= resourceSize:
|
||||
return 0, InvalidRange{
|
||||
|
@ -98,10 +95,7 @@ func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64
|
|||
|
||||
start = h.Start
|
||||
if h.IsSuffixLength {
|
||||
start = resourceSize + h.Start
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
start = max(resourceSize+h.Start, 0)
|
||||
}
|
||||
return start, length, nil
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ func (ies *IAMEtcdStore) getUsersSysType() UsersSysType {
|
|||
return ies.usersSysType
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error {
|
||||
func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item any, itemPath string, opts ...options) error {
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -114,7 +114,7 @@ func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, it
|
|||
return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...)
|
||||
}
|
||||
|
||||
func getIAMConfig(item interface{}, data []byte, itemPath string) error {
|
||||
func getIAMConfig(item any, data []byte, itemPath string) error {
|
||||
data, err := decryptData(data, itemPath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -123,7 +123,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error {
|
|||
return json.Unmarshal(data, item)
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error {
|
||||
func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item any, path string) error {
|
||||
data, err := readKeyEtcd(ctx, ies.client, path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -80,7 +81,7 @@ func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType {
|
|||
return iamOS.usersSysType
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error {
|
||||
func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item any, objPath string, opts ...options) error {
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
|
@ -135,7 +136,7 @@ func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context,
|
|||
return data, meta, nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error {
|
||||
func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item any, objPath string) error {
|
||||
data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -294,7 +295,6 @@ func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IA
|
|||
g := errgroup.WithNErrs(len(users))
|
||||
|
||||
for index := range users {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
userName := path.Dir(users[index])
|
||||
user, err := iamOS.loadUserIdentity(ctx, userName, userType)
|
||||
|
@ -413,7 +413,6 @@ func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, use
|
|||
g := errgroup.WithNErrs(len(users))
|
||||
|
||||
for index := range users {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
userName := strings.TrimSuffix(users[index], ".json")
|
||||
userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup)
|
||||
|
@ -538,7 +537,6 @@ func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, polici
|
|||
g := errgroup.WithNErrs(len(policies))
|
||||
|
||||
for index := range policies {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
policyName := path.Dir(policies[index])
|
||||
policyDoc, err := iamOS.loadPolicy(ctx, policyName)
|
||||
|
@ -776,9 +774,7 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
}
|
||||
|
||||
// Copy svcUsersMap to cache.iamUsersMap
|
||||
for k, v := range svcUsersMap {
|
||||
cache.iamUsersMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamUsersMap, svcUsersMap)
|
||||
|
||||
cache.buildUserGroupMemberships()
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -159,7 +160,7 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string
|
|||
type UserIdentity struct {
|
||||
Version int `json:"version"`
|
||||
Credentials auth.Credentials `json:"credentials"`
|
||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
func newUserIdentity(cred auth.Credentials) UserIdentity {
|
||||
|
@ -171,7 +172,7 @@ type GroupInfo struct {
|
|||
Version int `json:"version"`
|
||||
Status string `json:"status"`
|
||||
Members []string `json:"members"`
|
||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
func newGroupInfo(members []string) GroupInfo {
|
||||
|
@ -182,7 +183,7 @@ func newGroupInfo(members []string) GroupInfo {
|
|||
type MappedPolicy struct {
|
||||
Version int `json:"version"`
|
||||
Policies string `json:"policy"`
|
||||
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
}
|
||||
|
||||
// mappedPoliciesToMap copies the map of mapped policies to a regular map.
|
||||
|
@ -198,7 +199,7 @@ func mappedPoliciesToMap(m *xsync.MapOf[string, MappedPolicy]) map[string]Mapped
|
|||
// converts a mapped policy into a slice of distinct policies
|
||||
func (mp MappedPolicy) toSlice() []string {
|
||||
var policies []string
|
||||
for _, policy := range strings.Split(mp.Policies, ",") {
|
||||
for policy := range strings.SplitSeq(mp.Policies, ",") {
|
||||
if strings.TrimSpace(policy) == "" {
|
||||
continue
|
||||
}
|
||||
|
@ -219,8 +220,8 @@ func newMappedPolicy(policy string) MappedPolicy {
|
|||
type PolicyDoc struct {
|
||||
Version int `json:",omitempty"`
|
||||
Policy policy.Policy
|
||||
CreateDate time.Time `json:",omitempty"`
|
||||
UpdateDate time.Time `json:",omitempty"`
|
||||
CreateDate time.Time
|
||||
UpdateDate time.Time
|
||||
}
|
||||
|
||||
func newPolicyDoc(p policy.Policy) PolicyDoc {
|
||||
|
@ -400,7 +401,6 @@ func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool,
|
|||
g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time.
|
||||
|
||||
for index := range groups {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap)
|
||||
if err != nil && !errors.Is(err, errNoSuchPolicy) {
|
||||
|
@ -610,8 +610,8 @@ type IAMStorageAPI interface {
|
|||
loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
|
||||
loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error
|
||||
loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error
|
||||
saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error
|
||||
loadIAMConfig(ctx context.Context, item interface{}, path string) error
|
||||
saveIAMConfig(ctx context.Context, item any, path string, opts ...options) error
|
||||
loadIAMConfig(ctx context.Context, item any, path string) error
|
||||
deleteIAMConfig(ctx context.Context, path string) error
|
||||
savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error
|
||||
saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error
|
||||
|
@ -839,7 +839,7 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string,
|
|||
return policies, nil
|
||||
}
|
||||
if store.policy != nil {
|
||||
val, err, _ := store.policy.Do(name, func() (interface{}, error) {
|
||||
val, err, _ := store.policy.Do(name, func() (any, error) {
|
||||
return getPolicies()
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1614,9 +1614,7 @@ func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Polic
|
|||
}
|
||||
|
||||
cache := store.lock()
|
||||
for policy, p := range m {
|
||||
cache.iamPolicyDocsMap[policy] = p
|
||||
}
|
||||
maps.Copy(cache.iamPolicyDocsMap, m)
|
||||
store.unlock()
|
||||
|
||||
for policy, p := range m {
|
||||
|
@ -2909,7 +2907,7 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred
|
|||
func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error {
|
||||
groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn
|
||||
|
||||
newCachePopulate := func() (val interface{}, err error) {
|
||||
newCachePopulate := func() (val any, err error) {
|
||||
newCache := newIamCache()
|
||||
|
||||
// Check for service account first
|
||||
|
@ -2975,7 +2973,7 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
|
|||
}
|
||||
|
||||
var (
|
||||
val interface{}
|
||||
val any
|
||||
err error
|
||||
)
|
||||
if store.group != nil {
|
||||
|
@ -3007,30 +3005,20 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
|
|||
return true
|
||||
})
|
||||
|
||||
for k, v := range newCache.iamGroupsMap {
|
||||
cache.iamGroupsMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamGroupsMap, newCache.iamGroupsMap)
|
||||
|
||||
for k, v := range newCache.iamPolicyDocsMap {
|
||||
cache.iamPolicyDocsMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamPolicyDocsMap, newCache.iamPolicyDocsMap)
|
||||
|
||||
for k, v := range newCache.iamUserGroupMemberships {
|
||||
cache.iamUserGroupMemberships[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamUserGroupMemberships, newCache.iamUserGroupMemberships)
|
||||
|
||||
newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool {
|
||||
cache.iamUserPolicyMap.Store(k, v)
|
||||
return true
|
||||
})
|
||||
|
||||
for k, v := range newCache.iamUsersMap {
|
||||
cache.iamUsersMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamUsersMap, newCache.iamUsersMap)
|
||||
|
||||
for k, v := range newCache.iamSTSAccountsMap {
|
||||
cache.iamSTSAccountsMap[k] = v
|
||||
}
|
||||
maps.Copy(cache.iamSTSAccountsMap, newCache.iamSTSAccountsMap)
|
||||
|
||||
newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool {
|
||||
cache.iamSTSPolicyMap.Store(k, v)
|
||||
|
|
51
cmd/iam.go
51
cmd/iam.go
|
@ -24,6 +24,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"path"
|
||||
"sort"
|
||||
|
@ -276,7 +277,9 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
|||
for {
|
||||
if !openidInit {
|
||||
openidConfig, err := openid.LookupConfig(s,
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region())
|
||||
xhttp.WithUserAgent(NewHTTPTransport(), func() string {
|
||||
return getUserAgent(getMinioMode())
|
||||
}), xhttp.DrainBody, globalSite.Region())
|
||||
if err != nil {
|
||||
iamLogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err), logger.WarningKind)
|
||||
} else {
|
||||
|
@ -366,14 +369,11 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
|
|||
sys.rolesMap = make(map[arn.ARN]string)
|
||||
|
||||
// From OpenID
|
||||
if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil {
|
||||
sys.validateAndAddRolePolicyMappings(ctx, riMap)
|
||||
}
|
||||
maps.Copy(sys.rolesMap, sys.OpenIDConfig.GetRoleInfo())
|
||||
|
||||
// From AuthN plugin if enabled.
|
||||
if authn := newGlobalAuthNPluginFn(); authn != nil {
|
||||
riMap := authn.GetRoleInfo()
|
||||
sys.validateAndAddRolePolicyMappings(ctx, riMap)
|
||||
maps.Copy(sys.rolesMap, authn.GetRoleInfo())
|
||||
}
|
||||
|
||||
sys.printIAMRoles()
|
||||
|
@ -501,33 +501,6 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
|
|||
}
|
||||
}
|
||||
|
||||
func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) {
|
||||
// Validate that policies associated with roles are defined. If
|
||||
// authZ plugin is set, role policies are just claims sent to
|
||||
// the plugin and they need not exist.
|
||||
//
|
||||
// If some mapped policies do not exist, we print some error
|
||||
// messages but continue any way - they can be fixed in the
|
||||
// running server by creating the policies after start up.
|
||||
for arn, rolePolicies := range m {
|
||||
specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet()
|
||||
validPolicies, _ := sys.store.MergePolicies(rolePolicies)
|
||||
knownPoliciesSet := newMappedPolicy(validPolicies).policySet()
|
||||
unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet)
|
||||
if len(unknownPoliciesSet) > 0 {
|
||||
authz := newGlobalAuthZPluginFn()
|
||||
if authz == nil {
|
||||
// Print a warning that some policies mapped to a role are not defined.
|
||||
errMsg := fmt.Errorf(
|
||||
"The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.",
|
||||
unknownPoliciesSet.ToSlice(), arn.String())
|
||||
authZLogIf(ctx, errMsg, logger.WarningKind)
|
||||
}
|
||||
}
|
||||
sys.rolesMap[arn] = rolePolicies
|
||||
}
|
||||
}
|
||||
|
||||
// Prints IAM role ARNs.
|
||||
func (sys *IAMSys) printIAMRoles() {
|
||||
if len(sys.rolesMap) == 0 {
|
||||
|
@ -1083,7 +1056,7 @@ type newServiceAccountOpts struct {
|
|||
expiration *time.Time
|
||||
allowSiteReplicatorAccount bool // allow creating internal service account for site-replication.
|
||||
|
||||
claims map[string]interface{}
|
||||
claims map[string]any
|
||||
}
|
||||
|
||||
// NewServiceAccount - create a new service account
|
||||
|
@ -1126,7 +1099,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro
|
|||
if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount {
|
||||
return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed
|
||||
}
|
||||
m := make(map[string]interface{})
|
||||
m := make(map[string]any)
|
||||
m[parentClaim] = parentUser
|
||||
|
||||
if len(policyBuf) > 0 {
|
||||
|
@ -1372,7 +1345,7 @@ func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) (
|
|||
}
|
||||
|
||||
// GetClaimsForSvcAcc - gets the claims associated with the service account.
|
||||
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) {
|
||||
func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]any, error) {
|
||||
if !sys.Initialized() {
|
||||
return nil, errServerNotInitialized
|
||||
}
|
||||
|
@ -1723,10 +1696,8 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
|
|||
return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...))
|
||||
}
|
||||
|
||||
for k, v := range updatedKeysMap {
|
||||
// Replace the map values with the updated ones
|
||||
accessKeyMap[k] = v
|
||||
}
|
||||
// Replace the map values with the updated ones
|
||||
maps.Copy(accessKeyMap, updatedKeysMap)
|
||||
|
||||
return skippedAccessKeys, nil
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"maps"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
@ -110,9 +111,7 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b
|
|||
return nil, nil, false, errAuthentication
|
||||
}
|
||||
|
||||
for k, v := range eclaims {
|
||||
claims.MapClaims[k] = v
|
||||
}
|
||||
maps.Copy(claims.MapClaims, eclaims)
|
||||
|
||||
// if root access is disabled, disable all its service accounts and temporary credentials.
|
||||
if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() {
|
||||
|
|
|
@ -175,7 +175,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
|
|||
fn := authenticateNode
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
fn(creds.AccessKey, creds.SecretKey)
|
||||
}
|
||||
})
|
||||
|
@ -183,7 +183,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
|
|||
fn := newCachedAuthToken()
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
fn()
|
||||
}
|
||||
})
|
||||
|
|
|
@ -139,7 +139,7 @@ func pickRelevantGoroutines() (gs []string) {
|
|||
// get runtime stack buffer.
|
||||
buf := debug.Stack()
|
||||
// runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" .
|
||||
for _, g := range strings.Split(string(buf), "\n\n") {
|
||||
for g := range strings.SplitSeq(string(buf), "\n\n") {
|
||||
// Again split on a new line, the first line of the second half contains the info about the go routine.
|
||||
sl := strings.SplitN(g, "\n", 2)
|
||||
if len(sl) != 2 {
|
||||
|
|
|
@ -329,7 +329,7 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
|
|||
lris, ok := l.lockMap[resource]
|
||||
if !ok {
|
||||
// Just to be safe, delete uuids.
|
||||
for idx := 0; idx < maxDeleteList; idx++ {
|
||||
for idx := range maxDeleteList {
|
||||
mapID := formatUUID(uid, idx)
|
||||
if _, ok := l.lockUID[mapID]; !ok {
|
||||
break
|
||||
|
|
|
@ -279,12 +279,12 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) {
|
|||
}
|
||||
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
|
||||
l := newLocker()
|
||||
for i := 0; i < locks; i++ {
|
||||
for range locks {
|
||||
var tmp [16]byte
|
||||
rng.Read(tmp[:])
|
||||
res := []string{hex.EncodeToString(tmp[:])}
|
||||
|
||||
for i := 0; i < readers; i++ {
|
||||
for range readers {
|
||||
rng.Read(tmp[:])
|
||||
ok, err := l.RLock(t.Context(), dsync.LockArgs{
|
||||
UID: uuid.NewString(),
|
||||
|
@ -366,12 +366,12 @@ func Test_localLocker_RUnlock(t *testing.T) {
|
|||
}
|
||||
t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) {
|
||||
l := newLocker()
|
||||
for i := 0; i < locks; i++ {
|
||||
for range locks {
|
||||
var tmp [16]byte
|
||||
rng.Read(tmp[:])
|
||||
res := []string{hex.EncodeToString(tmp[:])}
|
||||
|
||||
for i := 0; i < readers; i++ {
|
||||
for range readers {
|
||||
rng.Read(tmp[:])
|
||||
ok, err := l.RLock(t.Context(), dsync.LockArgs{
|
||||
UID: uuid.NewString(),
|
||||
|
|
104
cmd/logging.go
104
cmd/logging.go
|
@ -8,211 +8,211 @@ import (
|
|||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
func proxyLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func proxyLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "proxy", err, errKind...)
|
||||
}
|
||||
|
||||
func replLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func replLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "replication", err, errKind...)
|
||||
}
|
||||
|
||||
func replLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func replLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "replication", err, id, errKind...)
|
||||
}
|
||||
|
||||
func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "iam", err, id, errKind...)
|
||||
}
|
||||
|
||||
func iamLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func iamLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
if !errors.Is(err, grid.ErrDisconnected) {
|
||||
logger.LogIf(ctx, "iam", err, errKind...)
|
||||
}
|
||||
}
|
||||
|
||||
func iamLogEvent(ctx context.Context, msg string, args ...interface{}) {
|
||||
func iamLogEvent(ctx context.Context, msg string, args ...any) {
|
||||
logger.Event(ctx, "iam", msg, args...)
|
||||
}
|
||||
|
||||
func rebalanceLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func rebalanceLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "rebalance", err, errKind...)
|
||||
}
|
||||
|
||||
func rebalanceLogEvent(ctx context.Context, msg string, args ...interface{}) {
|
||||
func rebalanceLogEvent(ctx context.Context, msg string, args ...any) {
|
||||
logger.Event(ctx, "rebalance", msg, args...)
|
||||
}
|
||||
|
||||
func adminLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func adminLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "admin", err, errKind...)
|
||||
}
|
||||
|
||||
func authNLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func authNLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "authN", err, errKind...)
|
||||
}
|
||||
|
||||
func authZLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func authZLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "authZ", err, errKind...)
|
||||
}
|
||||
|
||||
func peersLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func peersLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
if !errors.Is(err, grid.ErrDisconnected) {
|
||||
logger.LogIf(ctx, "peers", err, errKind...)
|
||||
}
|
||||
}
|
||||
|
||||
func peersLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func peersLogAlwaysIf(ctx context.Context, err error, errKind ...any) {
|
||||
if !errors.Is(err, grid.ErrDisconnected) {
|
||||
logger.LogAlwaysIf(ctx, "peers", err, errKind...)
|
||||
}
|
||||
}
|
||||
|
||||
func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
if !errors.Is(err, grid.ErrDisconnected) {
|
||||
logger.LogOnceIf(ctx, "peers", err, id, errKind...)
|
||||
}
|
||||
}
|
||||
|
||||
func bugLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func bugLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "internal", err, errKind...)
|
||||
}
|
||||
|
||||
func healingLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func healingLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "healing", err, errKind...)
|
||||
}
|
||||
|
||||
func healingLogEvent(ctx context.Context, msg string, args ...interface{}) {
|
||||
func healingLogEvent(ctx context.Context, msg string, args ...any) {
|
||||
logger.Event(ctx, "healing", msg, args...)
|
||||
}
|
||||
|
||||
func healingLogOnceIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func healingLogOnceIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "healing", err, errKind...)
|
||||
}
|
||||
|
||||
func batchLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func batchLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "batch", err, errKind...)
|
||||
}
|
||||
|
||||
func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "batch", err, id, errKind...)
|
||||
}
|
||||
|
||||
func bootLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func bootLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "bootstrap", err, errKind...)
|
||||
}
|
||||
|
||||
func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...)
|
||||
}
|
||||
|
||||
func dnsLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func dnsLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "dns", err, errKind...)
|
||||
}
|
||||
|
||||
func internalLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func internalLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "internal", err, errKind...)
|
||||
}
|
||||
|
||||
func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "internal", err, id, errKind...)
|
||||
}
|
||||
|
||||
func transitionLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func transitionLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "transition", err, errKind...)
|
||||
}
|
||||
|
||||
func configLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func configLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "config", err, errKind...)
|
||||
}
|
||||
|
||||
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "config", err, id, errKind...)
|
||||
}
|
||||
|
||||
func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...)
|
||||
}
|
||||
|
||||
func scannerLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func scannerLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "scanner", err, errKind...)
|
||||
}
|
||||
|
||||
func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "scanner", err, id, errKind...)
|
||||
}
|
||||
|
||||
func ilmLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func ilmLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "ilm", err, errKind...)
|
||||
}
|
||||
|
||||
func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "ilm", err, id, errKind...)
|
||||
}
|
||||
|
||||
func encLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func encLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "encryption", err, errKind...)
|
||||
}
|
||||
|
||||
func encLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func encLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "encryption", err, id, errKind...)
|
||||
}
|
||||
|
||||
func storageLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func storageLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "storage", err, errKind...)
|
||||
}
|
||||
|
||||
func storageLogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func storageLogAlwaysIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogAlwaysIf(ctx, "storage", err, errKind...)
|
||||
}
|
||||
|
||||
func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "storage", err, id, errKind...)
|
||||
}
|
||||
|
||||
func decomLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func decomLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "decom", err, errKind...)
|
||||
}
|
||||
|
||||
func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "decom", err, id, errKind...)
|
||||
}
|
||||
|
||||
func decomLogEvent(ctx context.Context, msg string, args ...interface{}) {
|
||||
func decomLogEvent(ctx context.Context, msg string, args ...any) {
|
||||
logger.Event(ctx, "decom", msg, args...)
|
||||
}
|
||||
|
||||
func etcdLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func etcdLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "etcd", err, errKind...)
|
||||
}
|
||||
|
||||
func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "etcd", err, id, errKind...)
|
||||
}
|
||||
|
||||
func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func metricsLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "metrics", err, errKind...)
|
||||
}
|
||||
|
||||
func s3LogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func s3LogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "s3", err, errKind...)
|
||||
}
|
||||
|
||||
func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "sftp", err, id, errKind...)
|
||||
}
|
||||
|
||||
func shutdownLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func shutdownLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "shutdown", err, errKind...)
|
||||
}
|
||||
|
||||
func stsLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func stsLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "sts", err, errKind...)
|
||||
}
|
||||
|
||||
func tierLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func tierLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "tier", err, errKind...)
|
||||
}
|
||||
|
||||
func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func kmsLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "kms", err, errKind...)
|
||||
}
|
||||
|
||||
|
@ -220,11 +220,11 @@ func kmsLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
|||
type KMSLogger struct{}
|
||||
|
||||
// LogOnceIf is the implementation of LogOnceIf, accessible using the Logger interface
|
||||
func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "kms", err, id, errKind...)
|
||||
}
|
||||
|
||||
// LogIf is the implementation of LogIf, accessible using the Logger interface
|
||||
func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "kms", err, errKind...)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"maps"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -70,7 +71,7 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *bucketMetacache) debugf(format string, data ...interface{}) {
|
||||
func (b *bucketMetacache) debugf(format string, data ...any) {
|
||||
if serverDebugLog {
|
||||
console.Debugf(format+"\n", data...)
|
||||
}
|
||||
|
@ -195,9 +196,7 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri
|
|||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
dst := make(map[string]metacache, len(b.caches))
|
||||
for k, v := range b.caches {
|
||||
dst[k] = v
|
||||
}
|
||||
maps.Copy(dst, b.caches)
|
||||
// Copy indexes
|
||||
dst2 := make(map[string][]string, len(b.cachesRoot))
|
||||
for k, v := range b.cachesRoot {
|
||||
|
|
|
@ -33,7 +33,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
|
|||
for i := range pathNames[:] {
|
||||
pathNames[i] = fmt.Sprintf("prefix/%d", i)
|
||||
}
|
||||
for i := 0; i < elements; i++ {
|
||||
for i := range elements {
|
||||
bm.findCache(listPathOptions{
|
||||
ID: mustGetUUID(),
|
||||
Bucket: "",
|
||||
|
@ -49,8 +49,8 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) {
|
|||
})
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for i := 0; b.Loop(); i++ {
|
||||
bm.findCache(listPathOptions{
|
||||
ID: mustGetUUID(),
|
||||
Bucket: "",
|
||||
|
|
|
@ -633,7 +633,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) {
|
|||
for testID, tt := range tests {
|
||||
rng := rand.New(rand.NewSource(0))
|
||||
// Run for a number of times, shuffling the input to ensure that output is consistent.
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
t.Run(fmt.Sprintf("test-%d-%s-run-%d", testID, tt.name, i), func(t *testing.T) {
|
||||
if i > 0 {
|
||||
rng.Shuffle(len(tt.m), func(i, j int) {
|
||||
|
|
|
@ -38,8 +38,8 @@ func (o *listPathOptions) parseMarker() {
|
|||
o.Marker = s[:start]
|
||||
end := strings.LastIndex(s, "]")
|
||||
tag := strings.Trim(s[start:end], "[]")
|
||||
tags := strings.Split(tag, ",")
|
||||
for _, tag := range tags {
|
||||
tags := strings.SplitSeq(tag, ",")
|
||||
for tag := range tags {
|
||||
kv := strings.Split(tag, ":")
|
||||
if len(kv) < 2 {
|
||||
continue
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -162,13 +163,13 @@ func (o listPathOptions) newMetacache() metacache {
|
|||
}
|
||||
}
|
||||
|
||||
func (o *listPathOptions) debugf(format string, data ...interface{}) {
|
||||
func (o *listPathOptions) debugf(format string, data ...any) {
|
||||
if serverDebugLog {
|
||||
console.Debugf(format+"\n", data...)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *listPathOptions) debugln(data ...interface{}) {
|
||||
func (o *listPathOptions) debugln(data ...any) {
|
||||
if serverDebugLog {
|
||||
console.Debugln(data...)
|
||||
}
|
||||
|
@ -225,7 +226,10 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache
|
|||
continue
|
||||
}
|
||||
if yes := o.shouldSkip(ctx, entry); yes {
|
||||
results.lastSkippedEntry = entry.name
|
||||
// when we have not enough results, record the skipped entry
|
||||
if o.Limit > 0 && results.len() < o.Limit {
|
||||
results.lastSkippedEntry = entry.name
|
||||
}
|
||||
continue
|
||||
}
|
||||
if o.Limit > 0 && results.len() >= o.Limit {
|
||||
|
@ -903,9 +907,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
|
|||
fi := FileInfo{
|
||||
Metadata: make(map[string]string, len(meta)),
|
||||
}
|
||||
for k, v := range meta {
|
||||
fi.Metadata[k] = v
|
||||
}
|
||||
maps.Copy(fi.Metadata, meta)
|
||||
err := er.updateObjectMetaWithOpts(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks(), UpdateMetadataOpts{NoPersistence: true})
|
||||
if err == nil {
|
||||
break
|
||||
|
|
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
@ -431,15 +432,9 @@ func (m *MetricV2) clone() MetricV2 {
|
|||
VariableLabels: make(map[string]string, len(m.VariableLabels)),
|
||||
Histogram: make(map[string]uint64, len(m.Histogram)),
|
||||
}
|
||||
for k, v := range m.StaticLabels {
|
||||
metric.StaticLabels[k] = v
|
||||
}
|
||||
for k, v := range m.VariableLabels {
|
||||
metric.VariableLabels[k] = v
|
||||
}
|
||||
for k, v := range m.Histogram {
|
||||
metric.Histogram[k] = v
|
||||
}
|
||||
maps.Copy(metric.StaticLabels, m.StaticLabels)
|
||||
maps.Copy(metric.VariableLabels, m.VariableLabels)
|
||||
maps.Copy(metric.Histogram, m.Histogram)
|
||||
return metric
|
||||
}
|
||||
|
||||
|
@ -2492,10 +2487,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 {
|
|||
"endpoint": ep,
|
||||
},
|
||||
}
|
||||
dwntime := currDowntime
|
||||
if health.offlineDuration > currDowntime {
|
||||
dwntime = health.offlineDuration
|
||||
}
|
||||
dwntime := max(health.offlineDuration, currDowntime)
|
||||
downtimeDuration.Value = float64(dwntime / time.Second)
|
||||
ml = append(ml, downtimeDuration)
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
|
||||
type promLogger struct{}
|
||||
|
||||
func (p promLogger) Println(v ...interface{}) {
|
||||
func (p promLogger) Println(v ...any) {
|
||||
metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v))
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestNSLockRace(t *testing.T) {
|
|||
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
nsLk := newNSLock(false)
|
||||
|
||||
// lk1; ref=1
|
||||
|
|
|
@ -201,7 +201,6 @@ func TestCheckLocalServerAddr(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
err := CheckLocalServerAddr(testCase.serverAddr)
|
||||
switch {
|
||||
|
@ -273,7 +272,6 @@ func TestSameLocalAddrs(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2)
|
||||
if testCase.expectedErr != nil && err == nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue