mirror of https://github.com/minio/minio.git
Compare commits
24 Commits
e5f6dcb951
...
b9e138e220
Author | SHA1 | Date |
---|---|---|
|
b9e138e220 | |
|
de234b888c | |
|
2718d9a430 | |
|
a65292cab1 | |
|
e0c79be251 | |
|
a6c538c5a1 | |
|
e1fcaebc77 | |
|
21409f112d | |
|
417c8648f0 | |
|
e2245a0b12 | |
|
b4b3d208dd | |
|
0a36d41dcd | |
|
ea77bcfc98 | |
|
9f24ca5d66 | |
|
816666a4c6 | |
|
2c7fe094d1 | |
|
9ebe168782 | |
|
ee2028cde6 | |
|
ecde75f911 | |
|
12a6ea89cc | |
|
63e102c049 | |
|
160f8a901b | |
|
ef9b03fbf5 | |
|
1d50cae43d |
|
@ -1,59 +0,0 @@
|
|||
name: FIPS Build Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.24.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Setup dockerfile for build test
|
||||
run: |
|
||||
GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//')
|
||||
echo Detected go version $GO_VERSION
|
||||
cat > Dockerfile.fips.test <<EOF
|
||||
FROM golang:${GO_VERSION}
|
||||
COPY . /minio
|
||||
WORKDIR /minio
|
||||
ENV GOEXPERIMENT=boringcrypto
|
||||
RUN make
|
||||
EOF
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.fips.test
|
||||
push: false
|
||||
load: true
|
||||
tags: minio/fips-test:latest
|
||||
|
||||
# This should fail if grep returns non-zero exit
|
||||
- name: Test binary
|
||||
run: |
|
||||
docker run --rm minio/fips-test:latest ./minio --version
|
||||
docker run --rm -i minio/fips-test:latest /bin/bash -c 'go tool nm ./minio | grep FIPS | grep -q FIPS'
|
|
@ -0,0 +1,93 @@
|
|||
# MinIO Pull Request Guidelines
|
||||
|
||||
These guidelines ensure high-quality commits in MinIO’s GitHub repositories, maintaining
|
||||
a clear, valuable commit history for our open-source projects. They apply to all contributors,
|
||||
fostering efficient reviews and robust code.
|
||||
|
||||
## Why Pull Requests?
|
||||
|
||||
Pull Requests (PRs) drive quality in MinIO’s codebase by:
|
||||
- Enabling peer review without pair programming.
|
||||
- Documenting changes for future reference.
|
||||
- Ensuring commits tell a clear story of development.
|
||||
|
||||
**A poor commit lasts forever, even if code is refactored.**
|
||||
|
||||
## Crafting a Quality PR
|
||||
|
||||
A strong MinIO PR:
|
||||
- Delivers a complete, valuable change (feature, bug fix, or improvement).
|
||||
- Has a concise title (e.g., `[S3] Fix bucket policy parsing #1234`) and a summary with context, referencing issues (e.g., `#1234`).
|
||||
- Contains well-written, logical commits explaining *why* changes were made (e.g., “Add S3 bucket tagging support so that users can organize resources efficiently”).
|
||||
- Is small, focused, and easy to review—ideally one commit, unless multiple commits better narrate complex work.
|
||||
- Adheres to MinIO’s coding standards (e.g., Go style, error handling, testing).
|
||||
|
||||
PRs must flow smoothly through review to reach production. Large PRs should be split into smaller, manageable ones.
|
||||
|
||||
## Submitting PRs
|
||||
|
||||
1. **Title and Summary**:
|
||||
- Use a scannable title: `[Subsystem] Action Description #Issue` (e.g., `[IAM] Add role-based access control #567`).
|
||||
- Include context in the summary: what changed, why, and any issue references.
|
||||
- Use `[WIP]` for in-progress PRs to avoid premature merging or choose GitHub draft PRs.
|
||||
|
||||
2. **Commits**:
|
||||
- Write clear messages: what changed and why (e.g., “Refactor S3 API handler to reduce latency so that requests process 20% faster”).
|
||||
- Rebase to tidy commits before submitting (e.g., `git rebase -i main` to squash typos or reword messages), unless multiple contributors worked on the branch.
|
||||
- Keep PRs focused—one feature or fix. Split large changes into multiple PRs.
|
||||
|
||||
3. **Testing**:
|
||||
- Include unit tests for new functionality or bug fixes.
|
||||
- Ensure existing tests pass (`make test`).
|
||||
- Document testing steps in the PR summary if manual testing was performed.
|
||||
|
||||
4. **Before Submitting**:
|
||||
- Run `make verify` to check formatting, linting, and tests.
|
||||
- Reference related issues (e.g., “Closes #1234”).
|
||||
- Notify team members via GitHub `@mentions` if urgent or complex.
|
||||
|
||||
## Reviewing PRs
|
||||
|
||||
Reviewers ensure MinIO’s commit history remains a clear, reliable record. Responsibilities include:
|
||||
|
||||
1. **Commit Quality**:
|
||||
- Verify each commit explains *why* the change was made (e.g., “So that…”).
|
||||
- Request rebasing if commits are unclear, redundant, or lack context (e.g., “Please squash typo fixes into the parent commit”).
|
||||
|
||||
2. **Code Quality**:
|
||||
- Check adherence to MinIO’s Go standards (e.g., error handling, documentation).
|
||||
- Ensure tests cover new code and pass CI.
|
||||
- Flag bugs or critical issues for immediate fixes; suggest non-blocking improvements as follow-up issues.
|
||||
|
||||
3. **Flow**:
|
||||
- Review promptly to avoid blocking progress.
|
||||
- Balance quality and speed—minor issues can be addressed later via issues, not PR blocks.
|
||||
- If unable to complete the review, tag another reviewer (e.g., `@username please take over`).
|
||||
|
||||
4. **Shared Responsibility**:
|
||||
- All MinIO contributors are reviewers. The first commenter on a PR owns the review unless they delegate.
|
||||
- Multiple reviewers are encouraged for complex PRs.
|
||||
|
||||
5. **No Self-Edits**:
|
||||
- Don’t modify the PR directly (e.g., fixing bugs). Request changes from the submitter or create a follow-up PR.
|
||||
- If you edit, you’re a collaborator, not a reviewer, and cannot merge.
|
||||
|
||||
6. **Testing**:
|
||||
- Assume the submitter tested the code. If testing is unclear, ask for details (e.g., “How was this tested?”).
|
||||
- Reject untested PRs unless testing is infeasible, then assist with test setup.
|
||||
|
||||
## Tips for Success
|
||||
|
||||
- **Small PRs**: Easier to review, faster to merge. Split large changes logically.
|
||||
- **Clear Commits**: Use `git rebase -i` to refine history before submitting.
|
||||
- **Engage Early**: Discuss complex changes in issues or Slack (https://slack.min.io) before coding.
|
||||
- **Be Responsive**: Address reviewer feedback promptly to keep PRs moving.
|
||||
- **Learn from Reviews**: Use feedback to improve future contributions.
|
||||
|
||||
## Resources
|
||||
|
||||
- [MinIO Coding Standards](https://github.com/minio/minio/blob/master/CONTRIBUTING.md)
|
||||
- [Effective Commit Messages](https://mislav.net/2014/02/hidden-documentation/)
|
||||
- [GitHub PR Tips](https://github.com/blog/1943-how-to-write-the-perfect-pull-request)
|
||||
|
||||
By following these guidelines, we ensure MinIO’s codebase remains high-quality, maintainable, and a joy to contribute to. Happy coding!
|
|
@ -1,7 +0,0 @@
|
|||
# MinIO FIPS Builds
|
||||
|
||||
MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring).
|
||||
|
||||
MinIO FIPS executables are available at <http://dl.min.io> - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories.
|
||||
|
||||
We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes.
|
33
README.md
33
README.md
|
@ -4,7 +4,13 @@
|
|||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. To learn more about what MinIO is doing for AI storage, go to [AI storage documentation](https://min.io/solutions/object-storage-for-ai).
|
||||
MinIO is a high-performance, S3-compatible object storage solution released under the GNU AGPL v3.0 license. Designed for speed and scalability, it powers AI/ML, analytics, and data-intensive workloads with industry-leading performance.
|
||||
|
||||
🔹 S3 API Compatible – Seamless integration with existing S3 tools
|
||||
🔹 Built for AI & Analytics – Optimized for large-scale data pipelines
|
||||
🔹 High Performance – Ideal for demanding storage workloads.
|
||||
|
||||
AI storage documentation (https://min.io/solutions/object-storage-for-ai).
|
||||
|
||||
This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md).
|
||||
|
||||
|
@ -34,7 +40,9 @@ You can also connect using any S3-compatible tool, such as the MinIO Client `mc`
|
|||
[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers,
|
||||
see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
> [!NOTE]
|
||||
> To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option.
|
||||
> For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container.
|
||||
|
||||
## macOS
|
||||
|
||||
|
@ -51,7 +59,8 @@ brew install minio/stable/minio
|
|||
minio server /data
|
||||
```
|
||||
|
||||
> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
> [!NOTE]
|
||||
> If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead.
|
||||
|
||||
```sh
|
||||
brew uninstall minio
|
||||
|
@ -98,7 +107,8 @@ The MinIO deployment starts using default root credentials `minioadmin:minioadmi
|
|||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
|
||||
## Microsoft Windows
|
||||
|
||||
|
@ -118,7 +128,8 @@ The MinIO deployment starts using default root credentials `minioadmin:minioadmi
|
|||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation.
|
||||
|
||||
## Install from Source
|
||||
|
||||
|
@ -132,7 +143,8 @@ The MinIO deployment starts using default root credentials `minioadmin:minioadmi
|
|||
|
||||
You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see <https://min.io/docs/minio/linux/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
|
||||
> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
> [!NOTE]
|
||||
> Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation.
|
||||
|
||||
MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments.
|
||||
|
||||
|
@ -170,7 +182,8 @@ This command gets the active zone(s). Now, apply port rules to the relevant zone
|
|||
firewall-cmd --zone=public --add-port=9000/tcp --permanent
|
||||
```
|
||||
|
||||
Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
|
||||
> [!NOTE]
|
||||
> `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect.
|
||||
|
||||
```sh
|
||||
firewall-cmd --reload
|
||||
|
@ -199,7 +212,8 @@ service iptables restart
|
|||
|
||||
MinIO Server comes with an embedded web based object browser. Point your web browser to <http://127.0.0.1:9000> to ensure your server has started successfully.
|
||||
|
||||
> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
> [!NOTE]
|
||||
> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port.
|
||||
|
||||
### Things to consider
|
||||
|
||||
|
@ -221,7 +235,8 @@ For example, consider a MinIO deployment behind a proxy `https://minio.example.n
|
|||
|
||||
Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO.
|
||||
|
||||
> NOTE: requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
> [!NOTE]
|
||||
> requires internet access to update directly from <https://dl.min.io>, optionally you can host any mirrors at <https://my-artifactory.example.com/minio/>
|
||||
|
||||
- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html)
|
||||
|
||||
|
|
|
@ -74,11 +74,11 @@ check_minimum_version() {
|
|||
|
||||
assert_is_supported_arch() {
|
||||
case "${ARCH}" in
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
|
||||
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
|
||||
return
|
||||
;;
|
||||
*)
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
|
||||
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
|
@ -9,7 +9,7 @@ function _init() {
|
|||
export CGO_ENABLED=0
|
||||
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
|
||||
}
|
||||
|
||||
function _build() {
|
||||
|
|
|
@ -1827,16 +1827,18 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
|||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
} else {
|
||||
entityName = foundGroupDN.NormDN
|
||||
}
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
} else {
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -168,6 +169,32 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
|
|||
if !stringsHasPrefixFold(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
// check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html
|
||||
// For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich
|
||||
// funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For
|
||||
// example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead
|
||||
// of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string.
|
||||
//
|
||||
// S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding
|
||||
// (quoted-printable) for mostly ASCII strings. Long strings are split at word
|
||||
// boundaries to fit RFC 2047’s 75-character limit, ensuring HTTP parser
|
||||
// compatibility.
|
||||
//
|
||||
// However, this splitting increases header size and can introduce errors, unlike Go’s
|
||||
// mime package in MinIO, which correctly encodes strings with fixed B/Q encodings,
|
||||
// avoiding S3’s heuristic-driven issues.
|
||||
//
|
||||
// For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs,
|
||||
// report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API
|
||||
// compatibility.
|
||||
if needsMimeEncoding(v) {
|
||||
// see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325
|
||||
if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") {
|
||||
v = mime.BEncoding.Encode("UTF-8", v)
|
||||
} else {
|
||||
v = mime.QEncoding.Encode("UTF-8", v)
|
||||
}
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
isSet = true
|
||||
break
|
||||
|
@ -229,3 +256,14 @@ func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo Object
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// needsEncoding reports whether s contains any bytes that need to be encoded.
|
||||
// see mime.needsEncoding
|
||||
func needsMimeEncoding(s string) bool {
|
||||
for _, b := range s {
|
||||
if (b < ' ' || b > '~') && b != '\t' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
|
|||
// - Force encoding of '~'
|
||||
func s3URLEncode(s string) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
if c == ' ' {
|
||||
|
@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
|
|||
|
||||
if hexCount == 0 {
|
||||
copy(t, s)
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
if s[i] == ' ' {
|
||||
t[i] = '+'
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
|
|||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
for i := range len(s) {
|
||||
switch c := s[i]; {
|
||||
case c == ' ':
|
||||
t[j] = '+'
|
||||
|
|
|
@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
|
|||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
bgSeq := newBgHealSequence()
|
||||
// Run the background healer
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
for range globalBackgroundHealRoutine.workers {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
|
||||
}
|
||||
|
||||
|
|
|
@ -248,7 +248,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
|||
pInfo PartInfo
|
||||
)
|
||||
|
||||
for i := 0; i < partsCount; i++ {
|
||||
for i := range partsCount {
|
||||
gopts := minio.GetObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
PartNumber: i + 1,
|
||||
|
|
|
@ -1089,6 +1089,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
break
|
||||
}
|
||||
|
||||
// check if have a file
|
||||
if reader == nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing"))
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if keyName, ok := formValues["Key"]; !ok {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest)
|
||||
apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing"))
|
||||
|
|
|
@ -38,7 +38,6 @@ import (
|
|||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
|
@ -556,7 +555,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
|
|||
objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader)
|
||||
sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "")
|
||||
crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey)
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
_, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return output, metabytes, err
|
||||
}
|
||||
|
@ -590,6 +589,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
|||
}
|
||||
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
_, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20})
|
||||
return outbuf.Bytes(), err
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
|
|||
sys.RUnlock()
|
||||
// send last n console log messages in order filtered by node
|
||||
if cnt > 0 {
|
||||
for i := 0; i < last; i++ {
|
||||
for i := range last {
|
||||
entry := lastN[(cnt+i)%last]
|
||||
if (entry == log.Info{}) {
|
||||
continue
|
||||
|
|
|
@ -332,7 +332,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, drive *xlStorage, c
|
|||
}
|
||||
|
||||
var skipHeal atomic.Bool
|
||||
if globalIsErasure || cache.Info.SkipHealing {
|
||||
if !globalIsErasure || cache.Info.SkipHealing {
|
||||
skipHeal.Store(true)
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ import (
|
|||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
|
@ -427,7 +426,7 @@ func newEncryptReader(ctx context.Context, content io.Reader, kind crypto.Type,
|
|||
return nil, crypto.ObjectKey{}, err
|
||||
}
|
||||
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()})
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
|
||||
if err != nil {
|
||||
return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
|
@ -570,7 +569,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte,
|
|||
reader, err := sio.DecryptReader(client, sio.Config{
|
||||
Key: objectEncryptionKey,
|
||||
SequenceNumber: seqNumber,
|
||||
CipherSuites: fips.DARECiphers(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
|
@ -1062,7 +1060,7 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn {
|
|||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte(baseKey))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("unable to encrypt using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
|
@ -1076,8 +1074,16 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
|
|||
return input, nil
|
||||
}
|
||||
var key []byte
|
||||
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
|
||||
key = k[:]
|
||||
if crypto.SSECopy.IsRequested(h) {
|
||||
sseCopyKey, err := crypto.SSECopy.ParseHTTP(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = sseCopyKey[:]
|
||||
} else {
|
||||
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
|
||||
key = k[:]
|
||||
}
|
||||
}
|
||||
key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined)
|
||||
if err != nil {
|
||||
|
@ -1085,11 +1091,12 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
|
|||
}
|
||||
mac := hmac.New(sha256.New, key)
|
||||
mac.Write([]byte(baseKey))
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||
return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil)})
|
||||
}
|
||||
}
|
||||
|
||||
// decryptPartsChecksums will attempt to decode checksums and return it/them if set.
|
||||
// decryptPartsChecksums will attempt to decrypt and decode part checksums, and save
|
||||
// only the decrypted part checksum values on ObjectInfo directly.
|
||||
// if part > 0, and we have the checksum for the part that will be returned.
|
||||
func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
|
||||
data := o.Checksum
|
||||
|
@ -1114,6 +1121,23 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
|
|||
}
|
||||
}
|
||||
|
||||
// decryptChecksum will attempt to decrypt the ObjectInfo.Checksum, returns the decrypted value
|
||||
// An error is only returned if it was encrypted and the decryption failed.
|
||||
func (o *ObjectInfo) decryptChecksum(h http.Header) ([]byte, error) {
|
||||
data := o.Checksum
|
||||
if len(data) == 0 {
|
||||
return data, nil
|
||||
}
|
||||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = decrypted
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// metadataEncryptFn provides an encryption function for metadata.
|
||||
// Will return nil, nil if unencrypted.
|
||||
func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn, error) {
|
||||
|
|
|
@ -1481,7 +1481,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
|
|
|
@ -504,7 +504,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
|
|||
|
||||
// count the number of offline disks
|
||||
offline := 0
|
||||
for i := 0; i < len(errs); i++ {
|
||||
for i := range len(errs) {
|
||||
var found bool
|
||||
switch {
|
||||
case errors.Is(errs[i], errDiskNotFound):
|
||||
|
@ -1221,7 +1221,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
|||
partsMetadata[index].SetInlineData()
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
|
@ -1470,7 +1470,17 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
actualSize = n
|
||||
}
|
||||
}
|
||||
if fi.Checksum == nil {
|
||||
// If ServerSideChecksum is wanted for this object, it takes precedence
|
||||
// over opts.WantChecksum.
|
||||
if opts.WantServerSideChecksumType.IsSet() {
|
||||
serverSideChecksum := r.RawServerSideChecksumResult()
|
||||
if serverSideChecksum != nil {
|
||||
fi.Checksum = serverSideChecksum.AppendTo(nil, nil)
|
||||
if opts.EncryptFn != nil {
|
||||
fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum)
|
||||
}
|
||||
}
|
||||
} else if fi.Checksum == nil && opts.WantChecksum != nil {
|
||||
// Trailing headers checksums should now be filled.
|
||||
fi.Checksum = opts.WantChecksum.AppendTo(nil, nil)
|
||||
if opts.EncryptFn != nil {
|
||||
|
@ -1557,7 +1567,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
// Object info is the same in all disks, so we can pick
|
||||
// the first meta from online disk
|
||||
|
@ -1574,7 +1584,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
if len(versions) == 0 {
|
||||
// Whether a disk was initially or becomes offline
|
||||
// during this upload, send it to the MRF list.
|
||||
for i := 0; i < len(onlineDisks); i++ {
|
||||
for i := range len(onlineDisks) {
|
||||
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ func (z *erasureServerPools) findIndex(index int) int {
|
|||
if z.rebalMeta == nil {
|
||||
return 0
|
||||
}
|
||||
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
|
||||
for i := range len(z.rebalMeta.PoolStats) {
|
||||
if i == index {
|
||||
return index
|
||||
}
|
||||
|
|
|
@ -1340,12 +1340,15 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
|
|||
}
|
||||
|
||||
putOpts := ObjectOptions{
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
NoLock: true,
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
NoLock: true,
|
||||
EncryptFn: dstOpts.EncryptFn,
|
||||
WantChecksum: dstOpts.WantChecksum,
|
||||
WantServerSideChecksumType: dstOpts.WantServerSideChecksumType,
|
||||
}
|
||||
|
||||
return z.serverPools[poolIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
||||
|
@ -1530,10 +1533,8 @@ func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, pre
|
|||
}
|
||||
|
||||
if loi.IsTruncated && merged.lastSkippedEntry > loi.NextMarker {
|
||||
// An object hidden by ILM was found during a truncated listing. Since the number of entries
|
||||
// fetched from drives is limited by max-keys, we should use the last ILM filtered entry
|
||||
// as a continuation token if it is lexially higher than the last visible object so that the
|
||||
// next call of WalkDir() with the max-keys can reach new objects not seen previously.
|
||||
// An object hidden by ILM was found during a truncated listing. Set the next marker
|
||||
// as the last skipped entry if it is lexically higher loi.NextMarker as an optimization
|
||||
loi.NextMarker = merged.lastSkippedEntry
|
||||
}
|
||||
|
||||
|
@ -1711,7 +1712,9 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
|||
}
|
||||
|
||||
z.mpCache.Range(func(_ string, mp MultipartInfo) bool {
|
||||
poolResult.Uploads = append(poolResult.Uploads, mp)
|
||||
if mp.Bucket == bucket {
|
||||
poolResult.Uploads = append(poolResult.Uploads, mp)
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(poolResult.Uploads, func(i int, j int) bool {
|
||||
|
|
|
@ -95,7 +95,7 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI {
|
|||
s.erasureDisksMu.RLock()
|
||||
defer s.erasureDisksMu.RUnlock()
|
||||
|
||||
for i := 0; i < s.setCount; i++ {
|
||||
for i := range s.setCount {
|
||||
for j := 0; j < s.setDriveCount; j++ {
|
||||
disk := s.erasureDisks[i][j]
|
||||
if disk == OfflineDisk {
|
||||
|
@ -150,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
|
|||
if diskID == offlineDiskUUID {
|
||||
return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID)
|
||||
}
|
||||
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
||||
for i := range len(refFormat.Erasure.Sets) {
|
||||
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
||||
if refFormat.Erasure.Sets[i][j] == diskID {
|
||||
return i, j, nil
|
||||
|
@ -174,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
|||
return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This)
|
||||
}
|
||||
|
||||
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
||||
for i := range len(refFormat.Erasure.Sets) {
|
||||
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
||||
if refFormat.Erasure.Sets[i][j] == format.Erasure.This {
|
||||
return i, j, nil
|
||||
|
@ -377,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||
|
||||
mutex := newNSLock(globalIsDistErasure)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
||||
}
|
||||
|
||||
|
@ -390,7 +390,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||
|
||||
var wg sync.WaitGroup
|
||||
var lk sync.Mutex
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
lockerEpSet := set.NewStringSet()
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
wg.Add(1)
|
||||
|
@ -409,7 +409,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
|||
}
|
||||
wg.Wait()
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
|
@ -868,11 +868,14 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
|||
}
|
||||
|
||||
putOpts := ObjectOptions{
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
||||
UserDefined: srcInfo.UserDefined,
|
||||
Versioned: dstOpts.Versioned,
|
||||
VersionID: dstOpts.VersionID,
|
||||
MTime: dstOpts.MTime,
|
||||
EncryptFn: dstOpts.EncryptFn,
|
||||
WantChecksum: dstOpts.WantChecksum,
|
||||
WantServerSideChecksumType: dstOpts.WantServerSideChecksumType,
|
||||
}
|
||||
|
||||
return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
||||
|
|
|
@ -98,7 +98,7 @@ func fmtGenMain(ctxt *cli.Context) {
|
|||
setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet
|
||||
format := newFormatErasureV3(setCount, setDriveCount)
|
||||
format.ID = deploymentID
|
||||
for i := 0; i < setCount; i++ { // for each erasure set
|
||||
for i := range setCount { // for each erasure set
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
|
|
|
@ -157,7 +157,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
|
|||
format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3
|
||||
format.Erasure.Sets = make([][]string, numSets)
|
||||
|
||||
for i := 0; i < numSets; i++ {
|
||||
for i := range numSets {
|
||||
format.Erasure.Sets[i] = make([]string, setLen)
|
||||
for j := 0; j < setLen; j++ {
|
||||
format.Erasure.Sets[i][j] = mustGetUUID()
|
||||
|
@ -514,7 +514,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
|||
}
|
||||
|
||||
// Make sure that the diskID is found in the set.
|
||||
for i := 0; i < len(tmpFormat.Erasure.Sets); i++ {
|
||||
for i := range len(tmpFormat.Erasure.Sets) {
|
||||
for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ {
|
||||
if this == tmpFormat.Erasure.Sets[i][j] {
|
||||
return nil
|
||||
|
@ -639,7 +639,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for i := range setCount {
|
||||
hostCount := make(map[string]int, setDriveCount)
|
||||
for j := 0; j < setDriveCount; j++ {
|
||||
disk := storageDisks[i*setDriveCount+j]
|
||||
|
|
10
cmd/grid.go
10
cmd/grid.go
|
@ -22,7 +22,7 @@ import (
|
|||
"crypto/tls"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/rest"
|
||||
|
@ -52,8 +52,8 @@ func initGlobalGrid(ctx context.Context, eps EndpointServerPools) error {
|
|||
newCachedAuthToken(),
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphers(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphers(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
}),
|
||||
Local: local,
|
||||
Hosts: hosts,
|
||||
|
@ -85,8 +85,8 @@ func initGlobalLockGrid(ctx context.Context, eps EndpointServerPools) error {
|
|||
newCachedAuthToken(),
|
||||
&tls.Config{
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphers(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphers(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
}, grid.RouteLockPath),
|
||||
Local: local,
|
||||
Hosts: hosts,
|
||||
|
|
|
@ -152,6 +152,10 @@ func encLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
|||
logger.LogIf(ctx, "encryption", err, errKind...)
|
||||
}
|
||||
|
||||
func encLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
logger.LogOnceIf(ctx, "encryption", err, id, errKind...)
|
||||
}
|
||||
|
||||
func storageLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
logger.LogIf(ctx, "storage", err, errKind...)
|
||||
}
|
||||
|
|
|
@ -223,7 +223,11 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
|
|||
|
||||
go func(o listPathOptions) {
|
||||
defer wg.Done()
|
||||
o.StopDiskAtLimit = true
|
||||
if o.Lifecycle == nil {
|
||||
// No filtering ahead, ask drives to stop
|
||||
// listing exactly at a specific limit.
|
||||
o.StopDiskAtLimit = true
|
||||
}
|
||||
listErr = z.listMerged(listCtx, o, filterCh)
|
||||
o.debugln("listMerged returned with", listErr)
|
||||
}(*o)
|
||||
|
@ -422,6 +426,9 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions
|
|||
go func() {
|
||||
var returned bool
|
||||
for entry := range inCh {
|
||||
if o.shouldSkip(ctx, entry) {
|
||||
continue
|
||||
}
|
||||
if !returned {
|
||||
funcReturnedMu.Lock()
|
||||
returned = funcReturned
|
||||
|
|
|
@ -174,6 +174,31 @@ func (o *listPathOptions) debugln(data ...interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func (o *listPathOptions) shouldSkip(ctx context.Context, entry metaCacheEntry) (yes bool) {
|
||||
if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) {
|
||||
return true
|
||||
}
|
||||
if o.Marker != "" && entry.name < o.Marker {
|
||||
return true
|
||||
}
|
||||
if !strings.HasPrefix(entry.name, o.Prefix) {
|
||||
return true
|
||||
}
|
||||
if o.Separator != "" && entry.isDir() && !strings.Contains(strings.TrimPrefix(entry.name, o.Prefix), o.Separator) {
|
||||
return true
|
||||
}
|
||||
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
|
||||
return true
|
||||
}
|
||||
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() {
|
||||
return true
|
||||
}
|
||||
if o.Lifecycle != nil || o.Replication.Config != nil {
|
||||
return triggerExpiryAndRepl(ctx, *o, entry)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// gatherResults will collect all results on the input channel and filter results according
|
||||
// to the options or to the current bucket ILM expiry rules.
|
||||
// Caller should close the channel when done.
|
||||
|
@ -199,27 +224,10 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache
|
|||
resCh = nil
|
||||
continue
|
||||
}
|
||||
if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) {
|
||||
if yes := o.shouldSkip(ctx, entry); yes {
|
||||
results.lastSkippedEntry = entry.name
|
||||
continue
|
||||
}
|
||||
if o.Marker != "" && entry.name < o.Marker {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(entry.name, o.Prefix) {
|
||||
continue
|
||||
}
|
||||
if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) {
|
||||
continue
|
||||
}
|
||||
if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() {
|
||||
continue
|
||||
}
|
||||
if o.Lifecycle != nil || o.Replication.Config != nil {
|
||||
if skipped := triggerExpiryAndRepl(ctx, *o, entry); skipped {
|
||||
results.lastSkippedEntry = entry.name
|
||||
continue
|
||||
}
|
||||
}
|
||||
if o.Limit > 0 && results.len() >= o.Limit {
|
||||
// We have enough and we have more.
|
||||
// Do not return io.EOF
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -68,6 +69,7 @@ const (
|
|||
// WalkDir will traverse a directory and return all entries found.
|
||||
// On success a sorted meta cache stream will be returned.
|
||||
// Metadata has data stripped, if any.
|
||||
// The function tries to quit as fast as the context is canceled to avoid further drive IO
|
||||
func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) {
|
||||
legacyFS := s.fsType != xfs && s.fsType != ext4
|
||||
|
||||
|
@ -146,6 +148,13 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
var scanDir func(path string) error
|
||||
|
||||
scanDir = func(current string) error {
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
if opts.Limit > 0 && objsReturned >= opts.Limit {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip forward, if requested...
|
||||
sb := bytebufferpool.Get()
|
||||
defer func() {
|
||||
|
@ -161,12 +170,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
forward = forward[:idx]
|
||||
}
|
||||
}
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
if opts.Limit > 0 && objsReturned >= opts.Limit {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.walkMu != nil {
|
||||
s.walkMu.Lock()
|
||||
|
@ -197,6 +200,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
// Avoid a bunch of cleanup when joining.
|
||||
current = strings.Trim(current, SlashSeparator)
|
||||
for i, entry := range entries {
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
if opts.Limit > 0 && objsReturned >= opts.Limit {
|
||||
return nil
|
||||
}
|
||||
|
@ -292,15 +298,15 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
if opts.Limit > 0 && objsReturned >= opts.Limit {
|
||||
return nil
|
||||
}
|
||||
if entry == "" {
|
||||
continue
|
||||
}
|
||||
if contextCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
meta := metaCacheEntry{name: pathJoinBuf(sb, current, entry)}
|
||||
|
||||
// If directory entry on stack before this, pop it now.
|
||||
|
@ -314,7 +320,10 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
|
|||
if opts.Recursive {
|
||||
// Scan folder we found. Should be in correct sort order where we are.
|
||||
err := scanDir(pop)
|
||||
if err != nil && !IsErrIgnored(err, context.Canceled) {
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
internalLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,61 +49,61 @@ const (
|
|||
var (
|
||||
bucketReplLastHrFailedBytesMD = NewGaugeMD(bucketReplLastHrFailedBytes,
|
||||
"Total number of bytes failed at least once to replicate in the last hour on a bucket",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplLastHrFailedCountMD = NewGaugeMD(bucketReplLastHrFailedCount,
|
||||
"Total number of objects which failed replication in the last hour on a bucket",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplLastMinFailedBytesMD = NewGaugeMD(bucketReplLastMinFailedBytes,
|
||||
"Total number of bytes failed at least once to replicate in the last full minute on a bucket",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplLastMinFailedCountMD = NewGaugeMD(bucketReplLastMinFailedCount,
|
||||
"Total number of objects which failed replication in the last full minute on a bucket",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplLatencyMsMD = NewGaugeMD(bucketReplLatencyMs,
|
||||
"Replication latency on a bucket in milliseconds",
|
||||
bucketL, operationL, rangeL, targetArnL)
|
||||
bucketReplProxiedDeleteTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsTotal,
|
||||
"Number of DELETE tagging requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedGetRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetRequestsFailures,
|
||||
"Number of failures in GET requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedGetRequestsTotalMD = NewCounterMD(bucketReplProxiedGetRequestsTotal,
|
||||
"Number of GET requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedGetTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsFailures,
|
||||
"Number of failures in GET tagging requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedGetTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsTotal,
|
||||
"Number of GET tagging requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedHeadRequestsFailuresMD = NewCounterMD(bucketReplProxiedHeadRequestsFailures,
|
||||
"Number of failures in HEAD requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedHeadRequestsTotalMD = NewCounterMD(bucketReplProxiedHeadRequestsTotal,
|
||||
"Number of HEAD requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedPutTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsFailures,
|
||||
"Number of failures in PUT tagging requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedPutTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsTotal,
|
||||
"Number of PUT tagging requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplSentBytesMD = NewCounterMD(bucketReplSentBytes,
|
||||
"Total number of bytes replicated to the target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplSentCountMD = NewCounterMD(bucketReplSentCount,
|
||||
"Total number of objects replicated to the target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplTotalFailedBytesMD = NewCounterMD(bucketReplTotalFailedBytes,
|
||||
"Total number of bytes failed at least once to replicate since server start",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplTotalFailedCountMD = NewCounterMD(bucketReplTotalFailedCount,
|
||||
"Total number of objects which failed replication since server start",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
bucketReplProxiedDeleteTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsFailures,
|
||||
"Number of failures in DELETE tagging requests proxied to replication target",
|
||||
bucketL)
|
||||
bucketL, targetArnL)
|
||||
)
|
||||
|
||||
// loadBucketReplicationMetrics - `BucketMetricsLoaderFn` for bucket replication metrics
|
||||
|
@ -121,11 +121,11 @@ func loadBucketReplicationMetrics(ctx context.Context, m MetricValues, c *metric
|
|||
|
||||
bucketReplStats := globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage)
|
||||
for _, bucket := range buckets {
|
||||
labels := []string{bucketL, bucket}
|
||||
if s, ok := bucketReplStats[bucket]; ok {
|
||||
stats := s.ReplicationStats
|
||||
if stats.hasReplicationUsage() {
|
||||
for arn, stat := range stats.Stats {
|
||||
labels := []string{bucketL, bucket, targetArnL, arn}
|
||||
m.Set(bucketReplLastHrFailedBytes, float64(stat.Failed.LastHour.Bytes), labels...)
|
||||
m.Set(bucketReplLastHrFailedCount, float64(stat.Failed.LastHour.Count), labels...)
|
||||
m.Set(bucketReplLastMinFailedBytes, float64(stat.Failed.LastMinute.Bytes), labels...)
|
||||
|
|
|
@ -266,7 +266,7 @@ func (m *mrfState) healRoutine(z *erasureServerPools) {
|
|||
if len(u.Versions) > 0 {
|
||||
vers := len(u.Versions) / 16
|
||||
if vers > 0 {
|
||||
for i := 0; i < vers; i++ {
|
||||
for i := range vers {
|
||||
healObject(u.Bucket, u.Object, uuid.UUID(u.Versions[16*i:]).String(), scan)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
|
|||
}
|
||||
|
||||
retryCount := g.retryCount
|
||||
for i := 0; i < retryCount; i++ {
|
||||
for i := range retryCount {
|
||||
g.errs[index].Err = nil
|
||||
if err := f(); err != nil {
|
||||
g.errs[index].Err = err
|
||||
|
|
|
@ -654,6 +654,7 @@ type objectAttributesChecksum struct {
|
|||
ChecksumSHA1 string `xml:",omitempty"`
|
||||
ChecksumSHA256 string `xml:",omitempty"`
|
||||
ChecksumCRC64NVME string `xml:",omitempty"`
|
||||
ChecksumType string `xml:",omitempty"`
|
||||
}
|
||||
|
||||
type objectAttributesParts struct {
|
||||
|
|
|
@ -86,6 +86,8 @@ type ObjectOptions struct {
|
|||
|
||||
WantChecksum *hash.Checksum // x-amz-checksum-XXX checksum sent to PutObject/ CompleteMultipartUpload.
|
||||
|
||||
WantServerSideChecksumType hash.ChecksumType // if set, we compute a server-side checksum of this type
|
||||
|
||||
NoDecryption bool // indicates if the stream must be decrypted.
|
||||
PreserveETag string // preserves this etag during a PUT call.
|
||||
NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
|
||||
|
|
|
@ -26,6 +26,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
)
|
||||
|
||||
func TestListObjectsVersionedFolders(t *testing.T) {
|
||||
|
@ -1929,3 +1932,121 @@ func BenchmarkListObjects(b *testing.B) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListObjectsWithILM(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testListObjectsWithILM)
|
||||
}
|
||||
|
||||
func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
||||
// Prepare lifecycle expiration workers
|
||||
es := newExpiryState(t1.Context(), obj, 0)
|
||||
globalExpiryState = es
|
||||
|
||||
t, _ := t1.(*testing.T)
|
||||
|
||||
objContent := "test-content"
|
||||
objMd5 := md5.Sum([]byte(objContent))
|
||||
|
||||
uploads := []struct {
|
||||
bucket string
|
||||
expired int
|
||||
notExpired int
|
||||
}{
|
||||
{"test-list-ilm-nothing-expired", 0, 6},
|
||||
{"test-list-ilm-all-expired", 6, 0},
|
||||
{"test-list-ilm-all-half-expired", 3, 3},
|
||||
}
|
||||
|
||||
oneWeekAgo := time.Now().Add(-7 * 24 * time.Hour)
|
||||
|
||||
lifecycleBytes := []byte(`
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Status>Enabled</Status>
|
||||
<Expiration>
|
||||
<Days>1</Days>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>
|
||||
`)
|
||||
|
||||
lifecycleConfig, err := lifecycle.ParseLifecycleConfig(bytes.NewReader(lifecycleBytes))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i, upload := range uploads {
|
||||
err := obj.MakeBucket(context.Background(), upload.bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
metadata, err := globalBucketMetadataSys.Get(upload.bucket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metadata.lifecycleConfig = lifecycleConfig
|
||||
globalBucketMetadataSys.Set(upload.bucket, metadata)
|
||||
defer globalBucketMetadataSys.Remove(upload.bucket)
|
||||
|
||||
// Upload objects which modtime as one week ago, supposed to be expired by ILM
|
||||
for range upload.expired {
|
||||
_, err := obj.PutObject(context.Background(), upload.bucket, randString(32),
|
||||
mustGetPutObjReader(t,
|
||||
bytes.NewBufferString(objContent),
|
||||
int64(len(objContent)),
|
||||
hex.EncodeToString(objMd5[:]),
|
||||
""),
|
||||
ObjectOptions{MTime: oneWeekAgo},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Upload objects which current time as modtime, not expired by ILM
|
||||
for range upload.notExpired {
|
||||
_, err := obj.PutObject(context.Background(), upload.bucket, randString(32),
|
||||
mustGetPutObjReader(t,
|
||||
bytes.NewBufferString(objContent),
|
||||
int64(len(objContent)),
|
||||
hex.EncodeToString(objMd5[:]),
|
||||
""),
|
||||
ObjectOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, maxKeys := range []int{1, 10, 49} {
|
||||
// Test ListObjects V2
|
||||
totalObjs, didRuns := 0, 0
|
||||
marker := ""
|
||||
for {
|
||||
didRuns++
|
||||
if didRuns > 1000 {
|
||||
t.Fatal("too many runs")
|
||||
return
|
||||
}
|
||||
result, err := obj.ListObjectsV2(context.Background(), upload.bucket, "", marker, "", maxKeys, false, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i, instanceType, err.Error())
|
||||
}
|
||||
totalObjs += len(result.Objects)
|
||||
if !result.IsTruncated {
|
||||
break
|
||||
}
|
||||
if marker != "" && marker == result.NextContinuationToken {
|
||||
t.Fatalf("infinite loop marker: %s", result.NextContinuationToken)
|
||||
}
|
||||
marker = result.NextContinuationToken
|
||||
}
|
||||
|
||||
if totalObjs != upload.notExpired {
|
||||
t.Fatalf("Test %d: %s: max-keys=%d, %d objects are expected to be seen, but %d found instead (didRuns=%d)",
|
||||
i+1, instanceType, maxKeys, upload.notExpired, totalObjs, didRuns)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ func IsValidBucketName(bucket string) bool {
|
|||
// 'label' in AWS terminology and if the bucket looks
|
||||
// like an IP address.
|
||||
isNotNumber := false
|
||||
for i := 0; i < len(piece); i++ {
|
||||
for i := range len(piece) {
|
||||
switch {
|
||||
case (piece[i] >= 'a' && piece[i] <= 'z' ||
|
||||
piece[i] == '-'):
|
||||
|
@ -254,11 +254,11 @@ func concat(ss ...string) string {
|
|||
}
|
||||
// create & allocate the memory in advance.
|
||||
n := 0
|
||||
for i := 0; i < length; i++ {
|
||||
for i := range length {
|
||||
n += len(ss[i])
|
||||
}
|
||||
b := make([]byte, 0, n)
|
||||
for i := 0; i < length; i++ {
|
||||
for i := range length {
|
||||
b = append(b, ss[i]...)
|
||||
}
|
||||
return unsafe.String(unsafe.SliceData(b), n)
|
||||
|
@ -1096,6 +1096,16 @@ func NewPutObjReader(rawReader *hash.Reader) *PutObjReader {
|
|||
return &PutObjReader{Reader: rawReader, rawReader: rawReader}
|
||||
}
|
||||
|
||||
// RawServerSideChecksumResult returns the ServerSideChecksumResult from the
|
||||
// underlying rawReader, since the PutObjReader might be encrypted data and
|
||||
// thus any checksum from that would be incorrect.
|
||||
func (p *PutObjReader) RawServerSideChecksumResult() *hash.Checksum {
|
||||
if p.rawReader != nil {
|
||||
return p.rawReader.ServerSideChecksumResult
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
|
||||
var emptyKey [32]byte
|
||||
if bytes.Equal(encKey[:], emptyKey[:]) {
|
||||
|
|
|
@ -641,6 +641,7 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj
|
|||
ChecksumSHA1: strings.Split(chkSums["SHA1"], "-")[0],
|
||||
ChecksumSHA256: strings.Split(chkSums["SHA256"], "-")[0],
|
||||
ChecksumCRC64NVME: strings.Split(chkSums["CRC64NVME"], "-")[0],
|
||||
ChecksumType: chkSums[xhttp.AmzChecksumType],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1465,6 +1466,46 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
targetSize, _ = srcInfo.DecryptedSize()
|
||||
}
|
||||
|
||||
// Client can request that a different type of checksum is computed server-side for the
|
||||
// destination object using the x-amz-checksum-algorithm header.
|
||||
headerChecksumType := hash.NewChecksumHeader(r.Header)
|
||||
if headerChecksumType.IsSet() {
|
||||
dstOpts.WantServerSideChecksumType = headerChecksumType.Base()
|
||||
srcInfo.Reader.AddServerSideChecksumHasher(headerChecksumType)
|
||||
dstOpts.WantChecksum = nil
|
||||
} else {
|
||||
// Check the source object for checksum.
|
||||
// If Checksum is not encrypted, decryptChecksum will be a no-op and return
|
||||
// the already unencrypted value.
|
||||
srcChecksumDecrypted, err := srcInfo.decryptChecksum(r.Header)
|
||||
if err != nil {
|
||||
encLogOnceIf(GlobalContext,
|
||||
fmt.Errorf("Unable to decryptChecksum for object: %s/%s, error: %w", srcBucket, srcObject, err),
|
||||
"copy-object-decrypt-checksums-"+srcBucket+srcObject)
|
||||
}
|
||||
|
||||
// The source object has a checksum set, we need the destination to have one too.
|
||||
if srcChecksumDecrypted != nil {
|
||||
dstOpts.WantChecksum = hash.ChecksumFromBytes(srcChecksumDecrypted)
|
||||
|
||||
// When an object is being copied from a source that is multipart, the destination will
|
||||
// no longer be multipart, and thus the checksum becomes full-object instead. Since
|
||||
// the CopyObject API does not require that the caller send us this final checksum, we need
|
||||
// to compute it server-side, with the same type as the source object.
|
||||
if dstOpts.WantChecksum != nil && dstOpts.WantChecksum.Type.IsMultipartComposite() {
|
||||
dstOpts.WantServerSideChecksumType = dstOpts.WantChecksum.Type.Base()
|
||||
srcInfo.Reader.AddServerSideChecksumHasher(dstOpts.WantServerSideChecksumType)
|
||||
dstOpts.WantChecksum = nil
|
||||
}
|
||||
} else {
|
||||
// S3: All copied objects without checksums and specified destination checksum algorithms
|
||||
// automatically gain a CRC-64NVME checksum algorithm.
|
||||
dstOpts.WantServerSideChecksumType = hash.ChecksumCRC64NVME
|
||||
srcInfo.Reader.AddServerSideChecksumHasher(dstOpts.WantServerSideChecksumType)
|
||||
dstOpts.WantChecksum = nil
|
||||
}
|
||||
}
|
||||
|
||||
if isTargetEncrypted {
|
||||
var encReader io.Reader
|
||||
kind, _ := crypto.IsRequested(r.Header)
|
||||
|
@ -1498,6 +1539,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
if dstOpts.IndexCB != nil {
|
||||
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
|
||||
}
|
||||
dstOpts.EncryptFn = metadataEncrypter(objEncKey)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1633,6 +1675,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
// After we've checked for an invalid copy (above), if a server-side checksum type
|
||||
// is requested, we need to read the source to recompute the checksum.
|
||||
if dstOpts.WantServerSideChecksumType.IsSet() {
|
||||
srcInfo.metadataOnly = false
|
||||
}
|
||||
|
||||
// Federation only.
|
||||
remoteCallRequired := isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI)
|
||||
|
||||
var objInfo ObjectInfo
|
||||
|
|
|
@ -42,7 +42,6 @@ import (
|
|||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
|
@ -527,9 +526,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||
|
||||
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
||||
encReader, err := sio.EncryptReader(reader, sio.Config{
|
||||
Key: partEncryptionKey[:],
|
||||
CipherSuites: fips.DARECiphers(),
|
||||
Nonce: &nonce,
|
||||
Key: partEncryptionKey[:],
|
||||
Nonce: &nonce,
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -825,9 +823,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
copy(nonce[:], tmp[:12])
|
||||
|
||||
reader, err = sio.EncryptReader(in, sio.Config{
|
||||
Key: partEncryptionKey[:],
|
||||
CipherSuites: fips.DARECiphers(),
|
||||
Nonce: &nonce,
|
||||
Key: partEncryptionKey[:],
|
||||
Nonce: &nonce,
|
||||
})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
|
|
@ -77,7 +77,7 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) {
|
|||
func setupTestReadDirFiles(t *testing.T) (testResults []result) {
|
||||
dir := t.TempDir()
|
||||
entries := []string{}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
name := fmt.Sprintf("file-%d", i)
|
||||
if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
|
||||
// For cleanup, its required to add these entries into test results.
|
||||
|
@ -102,7 +102,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) {
|
|||
t.Fatalf("Unable to create prefix directory \"mydir\", %s", err)
|
||||
}
|
||||
entries := []string{"mydir/"}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
name := fmt.Sprintf("file-%d", i)
|
||||
if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
|
||||
// For cleanup, its required to add these entries into test results.
|
||||
|
@ -126,7 +126,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) {
|
|||
}
|
||||
dir := t.TempDir()
|
||||
entries := []string{}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
name1 := fmt.Sprintf("file-%d", i)
|
||||
name2 := fmt.Sprintf("file-%d", i+10)
|
||||
if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {
|
||||
|
|
|
@ -452,7 +452,9 @@ func initAllSubsystems(ctx context.Context) {
|
|||
globalNotificationSys = NewNotificationSys(globalEndpoints)
|
||||
|
||||
// Create new notification system
|
||||
globalEventNotifier = NewEventNotifier(GlobalContext)
|
||||
if globalEventNotifier == nil {
|
||||
globalEventNotifier = NewEventNotifier(GlobalContext)
|
||||
}
|
||||
|
||||
// Create new bucket metadata system.
|
||||
if globalBucketMetadataSys == nil {
|
||||
|
|
|
@ -102,7 +102,7 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT
|
|||
var totalUploadTimes madmin.TimeDurations
|
||||
var totalDownloadTimes madmin.TimeDurations
|
||||
var totalDownloadTTFB madmin.TimeDurations
|
||||
for i := 0; i < len(throughputHighestResults); i++ {
|
||||
for i := range len(throughputHighestResults) {
|
||||
errStr := ""
|
||||
if throughputHighestResults[i].Error != "" {
|
||||
errStr = throughputHighestResults[i].Error
|
||||
|
|
|
@ -675,7 +675,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
|
|||
versions := make([]FileInfoVersions, totalVersions)
|
||||
decoder := msgpNewReader(r.Body)
|
||||
defer readMsgpReaderPoolPut(decoder)
|
||||
for i := 0; i < totalVersions; i++ {
|
||||
for i := range totalVersions {
|
||||
dst := &versions[i]
|
||||
if err := dst.DecodeMsg(decoder); err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
|
|
|
@ -50,8 +50,13 @@ const (
|
|||
updateTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// For windows our files have .exe additionally.
|
||||
var minioReleaseWindowsInfoURL = MinioReleaseURL + "minio.exe.sha256sum"
|
||||
var (
|
||||
// Newer official download info URLs appear earlier below.
|
||||
minioReleaseInfoURL = MinioReleaseURL + "minio.sha256sum"
|
||||
|
||||
// For windows our files have .exe additionally.
|
||||
minioReleaseWindowsInfoURL = MinioReleaseURL + "minio.exe.sha256sum"
|
||||
)
|
||||
|
||||
// minioVersionToReleaseTime - parses a standard official release
|
||||
// MinIO version string.
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
//go:build fips
|
||||
// +build fips
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Newer official download info URLs appear earlier below.
|
||||
var minioReleaseInfoURL = MinioReleaseURL + "minio.fips.sha256sum"
|
|
@ -1,24 +0,0 @@
|
|||
//go:build !fips
|
||||
// +build !fips
|
||||
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
// Newer official download info URLs appear earlier below.
|
||||
var minioReleaseInfoURL = MinioReleaseURL + "minio.sha256sum"
|
28
cmd/utils.go
28
cmd/utils.go
|
@ -52,7 +52,7 @@ import (
|
|||
"github.com/minio/minio/internal/config/api"
|
||||
xtls "github.com/minio/minio/internal/config/identity/tls"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
|
@ -612,8 +612,8 @@ func NewInternodeHTTPTransport(maxIdleConnsPerHost int) func() http.RoundTripper
|
|||
LookupHost: globalDNSCache.LookupHost,
|
||||
DialTimeout: rest.DefaultTimeout,
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphers(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphers(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
EnableHTTP2: false,
|
||||
TCPOptions: globalTCPOptions,
|
||||
}.NewInternodeHTTPTransport(maxIdleConnsPerHost)
|
||||
|
@ -626,8 +626,8 @@ func NewHTTPTransportWithClientCerts(clientCert, clientKey string) http.RoundTri
|
|||
LookupHost: globalDNSCache.LookupHost,
|
||||
DialTimeout: defaultDialTimeout,
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
TCPOptions: globalTCPOptions,
|
||||
EnableHTTP2: false,
|
||||
}
|
||||
|
@ -665,8 +665,8 @@ func NewHTTPTransportWithTimeout(timeout time.Duration) *http.Transport {
|
|||
DialTimeout: defaultDialTimeout,
|
||||
RootCAs: globalRootCAs,
|
||||
TCPOptions: globalTCPOptions,
|
||||
CipherSuites: fips.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
EnableHTTP2: false,
|
||||
}.NewHTTPTransportWithTimeout(timeout)
|
||||
}
|
||||
|
@ -677,8 +677,8 @@ func NewRemoteTargetHTTPTransport(insecure bool) func() *http.Transport {
|
|||
return xhttp.ConnSettings{
|
||||
LookupHost: globalDNSCache.LookupHost,
|
||||
RootCAs: globalRootCAs,
|
||||
CipherSuites: fips.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
TCPOptions: globalTCPOptions,
|
||||
EnableHTTP2: false,
|
||||
}.NewRemoteTargetHTTPTransport(insecure)
|
||||
|
@ -851,7 +851,7 @@ func lcp(strs []string, pre bool) string {
|
|||
// compare letters
|
||||
if pre {
|
||||
// prefix, iterate left to right
|
||||
for i := 0; i < maxl; i++ {
|
||||
for i := range maxl {
|
||||
if xfix[i] != str[i] {
|
||||
xfix = xfix[:i]
|
||||
break
|
||||
|
@ -859,7 +859,7 @@ func lcp(strs []string, pre bool) string {
|
|||
}
|
||||
} else {
|
||||
// suffix, iterate right to left
|
||||
for i := 0; i < maxl; i++ {
|
||||
for i := range maxl {
|
||||
xi := xfixl - i - 1
|
||||
si := strl - i - 1
|
||||
if xfix[xi] != str[si] {
|
||||
|
@ -986,11 +986,11 @@ func newTLSConfig(getCert certs.GetCertificateFunc) *tls.Config {
|
|||
}
|
||||
|
||||
if secureCiphers := env.Get(api.EnvAPISecureCiphers, config.EnableOn) == config.EnableOn; secureCiphers {
|
||||
tlsConfig.CipherSuites = fips.TLSCiphers()
|
||||
tlsConfig.CipherSuites = crypto.TLSCiphers()
|
||||
} else {
|
||||
tlsConfig.CipherSuites = fips.TLSCiphersBackwardCompatible()
|
||||
tlsConfig.CipherSuites = crypto.TLSCiphersBackwardCompatible()
|
||||
}
|
||||
tlsConfig.CurvePreferences = fips.TLSCurveIDs()
|
||||
tlsConfig.CurvePreferences = crypto.TLSCurveIDs()
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
|
|
|
@ -163,6 +163,7 @@ func newWarmBackendS3(conf madmin.TierS3, tier string) (*warmBackendS3, error) {
|
|||
Creds: creds,
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: globalRemoteTargetTransport,
|
||||
Region: conf.Region,
|
||||
}
|
||||
client, err := minio.New(u.Host, opts)
|
||||
if err != nil {
|
||||
|
|
|
@ -846,7 +846,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte,
|
|||
// Any non-nil error is returned.
|
||||
func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
|
||||
var tHdr, tMeta []byte // Zero copy bytes
|
||||
for i := 0; i < versions; i++ {
|
||||
for i := range versions {
|
||||
tHdr, buf, err = msgp.ReadBytesZC(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -2976,7 +2976,7 @@ func (s *xlStorage) RenamePart(ctx context.Context, srcVolume, srcPath, dstVolum
|
|||
return errFileAccessDenied
|
||||
}
|
||||
err = osErrToFileErr(err)
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
if errors.Is(err, errFileNotFound) || errors.Is(err, errFileAccessDenied) {
|
||||
return errUploadIDNotFound
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -2,7 +2,7 @@ version: '3.7'
|
|||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:RELEASE.2025-04-22T22-12-26Z
|
||||
image: quay.io/minio/minio:RELEASE.2025-06-13T11-33-47Z
|
||||
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
|
|
2
go.mod
2
go.mod
|
@ -51,7 +51,7 @@ require (
|
|||
github.com/lithammer/shortuuid/v4 v4.2.0
|
||||
github.com/miekg/dns v1.1.65
|
||||
github.com/minio/cli v1.24.2
|
||||
github.com/minio/console v1.7.7-0.20250507213720-ee974a59612f
|
||||
github.com/minio/console v1.7.7-0.20250623221437-2595faf715ea
|
||||
github.com/minio/csvparser v1.0.0
|
||||
github.com/minio/dnscache v0.1.1
|
||||
github.com/minio/dperf v0.6.3
|
||||
|
|
4
go.sum
4
go.sum
|
@ -421,8 +421,8 @@ github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg=
|
|||
github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
|
||||
github.com/minio/colorjson v1.0.8 h1:AS6gEQ1dTRYHmC4xuoodPDRILHP/9Wz5wYUGDQfPLpg=
|
||||
github.com/minio/colorjson v1.0.8/go.mod h1:wrs39G/4kqNlGjwqHvPlAnXuc2tlPszo6JKdSBCLN8w=
|
||||
github.com/minio/console v1.7.7-0.20250507213720-ee974a59612f h1:J2mhcVQxlMy//n14KQEd2NAyQUk6qMJtBVO7Yp9ebSQ=
|
||||
github.com/minio/console v1.7.7-0.20250507213720-ee974a59612f/go.mod h1:Jxp/p3RZctdaavbfRrIirQLMPlZ4IFEjInE9lzDtFjI=
|
||||
github.com/minio/console v1.7.7-0.20250623221437-2595faf715ea h1:alek8HJEcLZBzlUiPf8dICAbx+B07TK6WITY2oZuJKY=
|
||||
github.com/minio/console v1.7.7-0.20250623221437-2595faf715ea/go.mod h1:hKNkzdKBKU84w5wXqMnkH74QocJGHW2zjvFtuGETDsc=
|
||||
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
|
||||
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU=
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
{{- $fullName := printf "%s-console" (include "minio.fullname" .) -}}
|
||||
{{- $servicePort := .Values.consoleService.port -}}
|
||||
{{- $ingressPath := .Values.consoleIngress.path -}}
|
||||
{{- $ingressPathType := .Values.consoleIngress.pathType -}}
|
||||
apiVersion: {{ template "minio.consoleIngress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
|
@ -37,7 +38,7 @@ spec:
|
|||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
pathType: Prefix
|
||||
pathType: {{ $ingressPathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
{{- $fullName := include "minio.fullname" . -}}
|
||||
{{- $servicePort := .Values.service.port -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
{{- $ingressPathType := .Values.ingress.pathType -}}
|
||||
apiVersion: {{ template "minio.ingress.apiVersion" . }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
|
@ -37,7 +38,7 @@ spec:
|
|||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
pathType: Prefix
|
||||
pathType: {{ $ingressPathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
|
|
|
@ -16,11 +16,51 @@ spec:
|
|||
ingress:
|
||||
- ports:
|
||||
- port: {{ .Values.minioAPIPort }}
|
||||
protocol: TCP
|
||||
- port: {{ .Values.minioConsolePort }}
|
||||
protocol: TCP
|
||||
{{- if not .Values.networkPolicy.allowExternal }}
|
||||
from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{ template "minio.name" . }}-client: "true"
|
||||
{{- end }}
|
||||
{{- if .Values.networkPolicy.egress.enabled }}
|
||||
egress:
|
||||
- ports:
|
||||
{{ .Values.networkPolicy.egress.ports | toJson }}
|
||||
{{- with .Values.networkPolicy.egress.to }}
|
||||
to:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: {{ template "minio.networkPolicy.apiVersion" . }}
|
||||
metadata:
|
||||
name: {{ template "minio.fullname" . }}-post-job
|
||||
labels:
|
||||
app: {{ template "minio.name" . }}-post-job
|
||||
chart: {{ template "minio.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: {{ template "minio.name" . }}-job
|
||||
release: {{ .Release.Name }}
|
||||
egress:
|
||||
- ports:
|
||||
- port: {{ .Values.minioAPIPort }}
|
||||
protocol: TCP
|
||||
- port: {{ .Values.minioConsolePort }}
|
||||
protocol: TCP
|
||||
{{- if .Values.networkPolicy.egress.enabled }}
|
||||
- ports:
|
||||
{{ .Values.networkPolicy.egress.ports | toJson }}
|
||||
{{- with .Values.networkPolicy.egress.to }}
|
||||
to:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -200,9 +200,11 @@ service:
|
|||
ingress:
|
||||
enabled: false
|
||||
ingressClassName: ~
|
||||
labels: {}
|
||||
labels:
|
||||
{}
|
||||
# node-role.kubernetes.io/ingress: platform
|
||||
annotations: {}
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
# kubernetes.io/ingress.allow-http: "false"
|
||||
|
@ -211,6 +213,7 @@ ingress:
|
|||
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
|
||||
path: /
|
||||
pathType: Prefix
|
||||
hosts:
|
||||
- minio-example.local
|
||||
tls: []
|
||||
|
@ -241,9 +244,11 @@ consoleService:
|
|||
consoleIngress:
|
||||
enabled: false
|
||||
ingressClassName: ~
|
||||
labels: {}
|
||||
labels:
|
||||
{}
|
||||
# node-role.kubernetes.io/ingress: platform
|
||||
annotations: {}
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
# kubernetes.io/ingress.allow-http: "false"
|
||||
|
@ -252,6 +257,7 @@ consoleIngress:
|
|||
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
# nginx.ingress.kubernetes.io/whitelist-source-range: 0.0.0.0/0
|
||||
path: /
|
||||
pathType: Prefix
|
||||
hosts:
|
||||
- console.minio-example.local
|
||||
tls: []
|
||||
|
@ -391,7 +397,8 @@ makeUserJob:
|
|||
|
||||
## List of service accounts to be created after minio install
|
||||
##
|
||||
svcaccts: []
|
||||
svcaccts:
|
||||
[]
|
||||
## accessKey, secretKey and parent user to be assigned to the service accounts
|
||||
## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts
|
||||
# - accessKey: console-svcacct
|
||||
|
@ -430,7 +437,8 @@ makeServiceAccountJob:
|
|||
|
||||
## List of buckets to be created after minio install
|
||||
##
|
||||
buckets: []
|
||||
buckets:
|
||||
[]
|
||||
# # Name of the bucket
|
||||
# - name: bucket1
|
||||
# # Policy to be set on the
|
||||
|
@ -479,13 +487,15 @@ customCommandJob:
|
|||
requests:
|
||||
memory: 128Mi
|
||||
## Additional volumes to add to the post-job.
|
||||
extraVolumes: []
|
||||
extraVolumes:
|
||||
[]
|
||||
# - name: extra-policies
|
||||
# configMap:
|
||||
# name: my-extra-policies-cm
|
||||
## Additional volumeMounts to add to the custom commands container when
|
||||
## running the post-job.
|
||||
extraVolumeMounts: []
|
||||
extraVolumeMounts:
|
||||
[]
|
||||
# - name: extra-policies
|
||||
# mountPath: /mnt/extras/
|
||||
# Command to run after the main command on exit
|
||||
|
@ -542,10 +552,35 @@ networkPolicy:
|
|||
# Specifies whether the policies created will be standard Network Policies (flavor: kubernetes)
|
||||
# or Cilium Network Policies (flavor: cilium)
|
||||
flavor: kubernetes
|
||||
# allows external access to the minio api
|
||||
allowExternal: true
|
||||
## @params networkPolicy.egress configuration of the egress traffic
|
||||
egress:
|
||||
## @param networkPolicy.egress.enabled When enabled, an egress network policy will be
|
||||
## created allowing minio to connect to external data sources from kubernetes cluster.
|
||||
##
|
||||
enabled: false
|
||||
## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress
|
||||
## Add ports to the egress by specifying - port: <port number>
|
||||
## E.X.
|
||||
## - port: 80
|
||||
## - port: 443
|
||||
## - port: 53
|
||||
## protocol: UDP
|
||||
##
|
||||
ports: []
|
||||
## @param networkPolicy.egress.to Allow egress traffic to specific destinations
|
||||
## Add destinations to the egress by specifying - ipBlock: <CIDR>
|
||||
## E.X.
|
||||
## to:
|
||||
## - namespaceSelector:
|
||||
## matchExpressions:
|
||||
## - {key: role, operator: In, values: [minio]}
|
||||
##
|
||||
to: []
|
||||
# only when using flavor: cilium
|
||||
egressEntities:
|
||||
- kube-apiserver
|
||||
- kube-apiserver
|
||||
|
||||
## PodDisruptionBudget settings
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
||||
|
@ -573,7 +608,8 @@ metrics:
|
|||
# for node metrics
|
||||
relabelConfigs: {}
|
||||
# for cluster metrics
|
||||
relabelConfigsCluster: {}
|
||||
relabelConfigsCluster:
|
||||
{}
|
||||
# metricRelabelings:
|
||||
# - regex: (server|pod)
|
||||
# action: labeldrop
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"io"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/secure-io/sio-go"
|
||||
"github.com/secure-io/sio-go/sioutil"
|
||||
|
@ -64,7 +63,7 @@ func DecryptBytes(k *kms.KMS, ciphertext []byte, context kms.Context) ([]byte, e
|
|||
// ciphertext.
|
||||
func Encrypt(k *kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) {
|
||||
algorithm := sio.AES_256_GCM
|
||||
if !fips.Enabled && !sioutil.NativeAES() {
|
||||
if !sioutil.NativeAES() {
|
||||
algorithm = sio.ChaCha20Poly1305
|
||||
}
|
||||
|
||||
|
@ -145,9 +144,6 @@ func Decrypt(k *kms.KMS, ciphertext io.Reader, associatedData kms.Context) (io.R
|
|||
if err := json.Unmarshal(metadataBuffer, &metadata); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fips.Enabled && metadata.Algorithm != sio.AES_256_GCM {
|
||||
return nil, fmt.Errorf("config: unsupported encryption algorithm: %q is not supported in FIPS mode", metadata.Algorithm)
|
||||
}
|
||||
|
||||
key, err := k.Decrypt(context.TODO(), &kms.DecryptRequest{
|
||||
Name: metadata.KeyID,
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
@ -165,8 +165,8 @@ func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) {
|
|||
MinVersion: tls.VersionTLS12,
|
||||
NextProtos: []string{"http/1.1", "h2"},
|
||||
ClientSessionCache: tls.NewLRUClientSessionCache(64),
|
||||
CipherSuites: fips.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: fips.TLSCurveIDs(),
|
||||
CipherSuites: crypto.TLSCiphersBackwardCompatible(),
|
||||
CurvePreferences: crypto.TLSCurveIDs(),
|
||||
}
|
||||
// This is only to support client side certificate authentication
|
||||
// https://coreos.com/etcd/docs/latest/op-guide/security.html
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/pkg/v3/ldap"
|
||||
)
|
||||
|
||||
|
@ -197,7 +197,7 @@ func Lookup(s config.Config, rootCAs *x509.CertPool) (l Config, err error) {
|
|||
MinVersion: tls.VersionTLS12,
|
||||
NextProtos: []string{"h2", "http/1.1"},
|
||||
ClientSessionCache: tls.NewLRUClientSessionCache(100),
|
||||
CipherSuites: fips.TLSCiphersBackwardCompatible(), // Contains RSA key exchange
|
||||
CipherSuites: crypto.TLSCiphersBackwardCompatible(), // Contains RSA key exchange
|
||||
RootCAs: rootCAs,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -11,9 +11,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !fips
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
|
@ -22,7 +19,7 @@ import (
|
|||
"github.com/golang-jwt/jwt/v4"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
|
|
|
@ -12,9 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !fips
|
||||
// +build !fips
|
||||
|
||||
package openid
|
||||
|
||||
import (
|
||||
|
@ -23,7 +20,7 @@ import (
|
|||
"github.com/golang-jwt/jwt/v4"
|
||||
|
||||
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
|
||||
_ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
|
|
|
@ -15,22 +15,7 @@
|
|||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package fips provides functionality to configure cryptographic
|
||||
// implementations compliant with FIPS 140.
|
||||
//
|
||||
// FIPS 140 [1] is a US standard for data processing that specifies
|
||||
// requirements for cryptographic modules. Software that is "FIPS 140
|
||||
// compliant" must use approved cryptographic primitives only and that
|
||||
// are implemented by a FIPS 140 certified cryptographic module.
|
||||
//
|
||||
// So, FIPS 140 requires that a certified implementation of e.g. AES
|
||||
// is used to implement more high-level cryptographic protocols.
|
||||
// It does not require any specific security criteria for those
|
||||
// high-level protocols. FIPS 140 focuses only on the implementation
|
||||
// and usage of the most low-level cryptographic building blocks.
|
||||
//
|
||||
// [1]: https://en.wikipedia.org/wiki/FIPS_140
|
||||
package fips
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
@ -38,40 +23,13 @@ import (
|
|||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
// Enabled indicates whether cryptographic primitives,
|
||||
// like AES or SHA-256, are implemented using a FIPS 140
|
||||
// certified module.
|
||||
//
|
||||
// If FIPS-140 is enabled no non-NIST/FIPS approved
|
||||
// primitives must be used.
|
||||
const Enabled = enabled
|
||||
|
||||
// DARECiphers returns a list of supported cipher suites
|
||||
// for the DARE object encryption.
|
||||
func DARECiphers() []byte {
|
||||
if Enabled {
|
||||
return []byte{sio.AES_256_GCM}
|
||||
}
|
||||
return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305}
|
||||
}
|
||||
func DARECiphers() []byte { return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305} }
|
||||
|
||||
// TLSCiphers returns a list of supported TLS transport
|
||||
// cipher suite IDs.
|
||||
//
|
||||
// The list contains only ciphers that use AES-GCM or
|
||||
// (non-FIPS) CHACHA20-POLY1305 and ellitpic curve key
|
||||
// exchange.
|
||||
func TLSCiphers() []uint16 {
|
||||
if Enabled {
|
||||
return []uint16{
|
||||
tls.TLS_AES_128_GCM_SHA256, // TLS 1.3
|
||||
tls.TLS_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
}
|
||||
}
|
||||
return []uint16{
|
||||
tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3
|
||||
tls.TLS_AES_128_GCM_SHA256,
|
||||
|
@ -92,24 +50,6 @@ func TLSCiphers() []uint16 {
|
|||
// ciphers for backward compatibility. In particular, AES-CBC
|
||||
// and non-ECDHE ciphers.
|
||||
func TLSCiphersBackwardCompatible() []uint16 {
|
||||
if Enabled {
|
||||
return []uint16{
|
||||
tls.TLS_AES_128_GCM_SHA256, // TLS 1.3
|
||||
tls.TLS_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 ECDHE GCM
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // TLS 1.2 ECDHE CBC
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 non-ECDHE
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
}
|
||||
}
|
||||
return []uint16{
|
||||
tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3
|
||||
tls.TLS_AES_128_GCM_SHA256,
|
||||
|
@ -134,10 +74,5 @@ func TLSCiphersBackwardCompatible() []uint16 {
|
|||
// TLSCurveIDs returns a list of supported elliptic curve IDs
|
||||
// in preference order.
|
||||
func TLSCurveIDs() []tls.CurveID {
|
||||
var curves []tls.CurveID
|
||||
if !Enabled {
|
||||
curves = append(curves, tls.X25519) // Only enable X25519 in non-FIPS mode
|
||||
}
|
||||
curves = append(curves, tls.CurveP256, tls.CurveP384, tls.CurveP521)
|
||||
return curves
|
||||
return []tls.CurveID{tls.CurveP256, tls.X25519, tls.CurveP384, tls.CurveP521}
|
||||
}
|
|
@ -27,7 +27,6 @@ import (
|
|||
"io"
|
||||
"path"
|
||||
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/sio"
|
||||
|
@ -98,7 +97,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
|
|||
mac.Write([]byte(SealAlgorithm))
|
||||
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
|
||||
mac.Sum(sealingKey[:0])
|
||||
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:], CipherSuites: fips.DARECiphers()}); n != 64 || err != nil {
|
||||
if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key"))
|
||||
}
|
||||
sealedKey := SealedKey{
|
||||
|
@ -123,12 +122,12 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
|
|||
mac.Write([]byte(domain))
|
||||
mac.Write([]byte(SealAlgorithm))
|
||||
mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object'
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil)}
|
||||
case InsecureSealAlgorithm:
|
||||
sha := sha256.New()
|
||||
sha.Write(extKey)
|
||||
sha.Write(sealedKey.IV[:])
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.DARECiphers()}
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)}
|
||||
}
|
||||
|
||||
if out, err := sio.DecryptBuffer(key[:0], sealedKey.Key[:], unsealConfig); len(out) != 32 || err != nil {
|
||||
|
@ -159,7 +158,7 @@ func (key ObjectKey) SealETag(etag []byte) []byte {
|
|||
var buffer bytes.Buffer
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil {
|
||||
if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key"))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
|
@ -175,5 +174,5 @@ func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) {
|
|||
}
|
||||
mac := hmac.New(sha256.New, key[:])
|
||||
mac.Write([]byte("SSE-etag"))
|
||||
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()})
|
||||
return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil)})
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/sio"
|
||||
|
@ -101,7 +100,7 @@ func unsealObjectKey(clientKey []byte, metadata map[string]string, bucket, objec
|
|||
// EncryptSinglePart encrypts an io.Reader which must be the
|
||||
// body of a single-part PUT request.
|
||||
func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader {
|
||||
r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:], CipherSuites: fips.DARECiphers()})
|
||||
r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:]})
|
||||
if err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to encrypt io.Reader using object key"))
|
||||
}
|
||||
|
@ -123,7 +122,7 @@ func DecryptSinglePart(w io.Writer, offset, length int64, key ObjectKey) io.Writ
|
|||
const PayloadSize = 1 << 16 // DARE 2.0
|
||||
w = ioutil.LimitedWriter(w, offset%PayloadSize, length)
|
||||
|
||||
decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:], CipherSuites: fips.DARECiphers()})
|
||||
decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:]})
|
||||
if err != nil {
|
||||
logger.CriticalIf(context.Background(), errors.New("Unable to decrypt io.Writer using object key"))
|
||||
}
|
||||
|
|
|
@ -381,7 +381,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int)
|
|||
lockNotFound, lockRefreshed := 0, 0
|
||||
done := false
|
||||
|
||||
for i := 0; i < len(restClnts); i++ {
|
||||
for range len(restClnts) {
|
||||
select {
|
||||
case refreshResult := <-ch:
|
||||
if refreshResult.offline {
|
||||
|
|
|
@ -117,7 +117,6 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/sio"
|
||||
|
@ -346,8 +345,7 @@ func Decrypt(key []byte, etag ETag) (ETag, error) {
|
|||
|
||||
plaintext := make([]byte, 0, 16)
|
||||
etag, err := sio.DecryptBuffer(plaintext, etag, sio.Config{
|
||||
Key: decryptionKey,
|
||||
CipherSuites: fips.DARECiphers(),
|
||||
Key: decryptionKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -357,7 +357,7 @@ func (list *TargetList) startSendWorkers(workerCount int) {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i := 0; i < workerCount; i++ {
|
||||
for range workerCount {
|
||||
wk.Take()
|
||||
go func() {
|
||||
defer wk.Give()
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build fips && linux && amd64
|
||||
// +build fips,linux,amd64
|
||||
|
||||
package fips
|
||||
|
||||
import _ "crypto/tls/fipsonly"
|
||||
|
||||
const enabled = true
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !fips
|
||||
// +build !fips
|
||||
|
||||
package fips
|
||||
|
||||
const enabled = false
|
|
@ -1041,7 +1041,7 @@ func (c *Connection) readStream(ctx context.Context, conn net.Conn, cancel conte
|
|||
// Handle merged messages.
|
||||
messages := int(m.Seq)
|
||||
c.inMessages.Add(int64(messages))
|
||||
for i := 0; i < messages; i++ {
|
||||
for range messages {
|
||||
if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ func (t *TestGrid) WaitAllConnect(ctx context.Context) {
|
|||
}
|
||||
|
||||
func getHosts(n int) (hosts []string, listeners []net.Listener, err error) {
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {
|
||||
|
|
|
@ -574,7 +574,7 @@ func (m *muxClient) ack(seq uint32) {
|
|||
return
|
||||
}
|
||||
available := cap(m.outBlock)
|
||||
for i := 0; i < available; i++ {
|
||||
for range available {
|
||||
m.outBlock <- struct{}{}
|
||||
}
|
||||
m.acked = true
|
||||
|
|
|
@ -130,7 +130,7 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea
|
|||
// Fill outbound block.
|
||||
// Each token represents a message that can be sent to the client without blocking.
|
||||
// The client will refill the tokens as they confirm delivery of the messages.
|
||||
for i := 0; i < outboundCap; i++ {
|
||||
for range outboundCap {
|
||||
m.outBlock <- struct{}{}
|
||||
}
|
||||
|
||||
|
|
|
@ -230,6 +230,11 @@ func (c ChecksumType) FullObjectRequested() bool {
|
|||
return c&(ChecksumFullObject) == ChecksumFullObject || c.Is(ChecksumCRC64NVME)
|
||||
}
|
||||
|
||||
// IsMultipartComposite returns true if the checksum is multipart and full object was not requested.
|
||||
func (c ChecksumType) IsMultipartComposite() bool {
|
||||
return c.Is(ChecksumMultipart) && !c.FullObjectRequested()
|
||||
}
|
||||
|
||||
// ObjType returns a string to return as x-amz-checksum-type.
|
||||
func (c ChecksumType) ObjType() string {
|
||||
if c.FullObjectRequested() {
|
||||
|
@ -269,7 +274,7 @@ func (c ChecksumType) Trailing() bool {
|
|||
return c.Is(ChecksumTrailing)
|
||||
}
|
||||
|
||||
// NewChecksumFromData returns a new checksum from specified algorithm and base64 encoded value.
|
||||
// NewChecksumFromData returns a new Checksum, using specified algorithm type on data.
|
||||
func NewChecksumFromData(t ChecksumType, data []byte) *Checksum {
|
||||
if !t.IsSet() {
|
||||
return nil
|
||||
|
@ -311,8 +316,6 @@ func ReadCheckSums(b []byte, part int) (cs map[string]string, isMP bool) {
|
|||
}
|
||||
if !typ.FullObjectRequested() {
|
||||
cs = fmt.Sprintf("%s-%d", cs, t)
|
||||
} else if part <= 0 {
|
||||
res[xhttp.AmzChecksumType] = xhttp.AmzChecksumTypeFullObject
|
||||
}
|
||||
b = b[n:]
|
||||
if part > 0 {
|
||||
|
@ -337,6 +340,13 @@ func ReadCheckSums(b []byte, part int) (cs map[string]string, isMP bool) {
|
|||
}
|
||||
if cs != "" {
|
||||
res[typ.String()] = cs
|
||||
res[xhttp.AmzChecksumType] = typ.ObjType()
|
||||
if !typ.Is(ChecksumMultipart) {
|
||||
// Single part PUTs are always FULL_OBJECT checksum
|
||||
// Refer https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
|
||||
// **For PutObject uploads, the checksum type is always FULL_OBJECT.**
|
||||
res[xhttp.AmzChecksumType] = ChecksumFullObject.ObjType()
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(res) == 0 {
|
||||
|
@ -468,6 +478,65 @@ func (c *Checksum) AppendTo(b []byte, parts []byte) []byte {
|
|||
return b
|
||||
}
|
||||
|
||||
// ChecksumFromBytes reconstructs a Checksum struct from the serialized bytes created in AppendTo()
|
||||
// Returns nil if the bytes are invalid or empty.
|
||||
// AppendTo() can append a serialized Checksum to another already-serialized Checksum,
|
||||
// however, in practice, we only use one at a time.
|
||||
// ChecksumFromBytes only returns the first one and no part checksums.
|
||||
func ChecksumFromBytes(b []byte) *Checksum {
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read checksum type
|
||||
t, n := binary.Uvarint(b)
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
typ := ChecksumType(t)
|
||||
length := typ.RawByteLen()
|
||||
if length == 0 || len(b) < length {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read raw checksum bytes
|
||||
raw := make([]byte, length)
|
||||
copy(raw, b[:length])
|
||||
b = b[length:]
|
||||
|
||||
c := &Checksum{
|
||||
Type: typ,
|
||||
Raw: raw,
|
||||
Encoded: base64.StdEncoding.EncodeToString(raw),
|
||||
}
|
||||
|
||||
// Handle multipart checksums
|
||||
if typ.Is(ChecksumMultipart) {
|
||||
parts, n := binary.Uvarint(b)
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
c.WantParts = int(parts)
|
||||
|
||||
if typ.Is(ChecksumIncludesMultipart) {
|
||||
wantLen := int(parts) * length
|
||||
if len(b) < wantLen {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !c.Valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Valid returns whether checksum is valid.
|
||||
func (c Checksum) Valid() bool {
|
||||
if c.Type == ChecksumInvalid {
|
||||
|
@ -506,12 +575,26 @@ func (c Checksum) Matches(content []byte, parts int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AsMap returns the
|
||||
// AsMap returns the checksum as a map[string]string.
|
||||
func (c *Checksum) AsMap() map[string]string {
|
||||
if c == nil || !c.Valid() {
|
||||
return nil
|
||||
}
|
||||
return map[string]string{c.Type.String(): c.Encoded}
|
||||
return map[string]string{
|
||||
c.Type.String(): c.Encoded,
|
||||
xhttp.AmzChecksumType: c.Type.ObjType(),
|
||||
}
|
||||
}
|
||||
|
||||
// Equal returns whether two checksum structs are equal in all their fields.
|
||||
func (c *Checksum) Equal(s *Checksum) bool {
|
||||
if c == nil || s == nil {
|
||||
return c == s
|
||||
}
|
||||
return c.Type == s.Type &&
|
||||
c.Encoded == s.Encoded &&
|
||||
bytes.Equal(c.Raw, s.Raw) &&
|
||||
c.WantParts == s.WantParts
|
||||
}
|
||||
|
||||
// TransferChecksumHeader will transfer any checksum value that has been checked.
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
// Copyright (c) 2015-2025 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestChecksumAddToHeader tests that adding and retrieving a checksum on a header works
|
||||
func TestChecksumAddToHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
checksum ChecksumType
|
||||
fullobj bool
|
||||
}{
|
||||
{"CRC32-composite", ChecksumCRC32, false},
|
||||
{"CRC32-full-object", ChecksumCRC32, true},
|
||||
{"CRC32C-composite", ChecksumCRC32C, false},
|
||||
{"CRC32C-full-object", ChecksumCRC32C, true},
|
||||
{"CRC64NVME-full-object", ChecksumCRC64NVME, false}, // testing with false, because it always is full object.
|
||||
{"ChecksumSHA1-composite", ChecksumSHA1, false},
|
||||
{"ChecksumSHA256-composite", ChecksumSHA256, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
myData := []byte("this-is-a-checksum-data-test")
|
||||
chksm := NewChecksumFromData(tt.checksum, myData)
|
||||
if tt.fullobj {
|
||||
chksm.Type |= ChecksumFullObject
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
AddChecksumHeader(w, chksm.AsMap())
|
||||
gotChksm, err := GetContentChecksum(w.Result().Header)
|
||||
if err != nil {
|
||||
t.Fatalf("GetContentChecksum failed: %v", err)
|
||||
}
|
||||
|
||||
// In the CRC64NVM case, it is always full object, so add the flag for easier equality comparison
|
||||
if chksm.Type.Base().Is(ChecksumCRC64NVME) {
|
||||
chksm.Type |= ChecksumFullObject
|
||||
}
|
||||
if !chksm.Equal(gotChksm) {
|
||||
t.Fatalf("Checksum mismatch: expected %+v, got %+v", chksm, gotChksm)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestChecksumSerializeDeserialize checks AppendTo can be reversed by ChecksumFromBytes
|
||||
func TestChecksumSerializeDeserialize(t *testing.T) {
|
||||
myData := []byte("this-is-a-checksum-data-test")
|
||||
chksm := NewChecksumFromData(ChecksumCRC32, myData)
|
||||
if chksm == nil {
|
||||
t.Fatal("NewChecksumFromData returned nil")
|
||||
}
|
||||
// Serialize the checksum to bytes
|
||||
b := chksm.AppendTo(nil, nil)
|
||||
if b == nil {
|
||||
t.Fatal("AppendTo returned nil")
|
||||
}
|
||||
|
||||
// Deserialize the checksum from bytes
|
||||
chksmOut := ChecksumFromBytes(b)
|
||||
if chksmOut == nil {
|
||||
t.Fatal("ChecksumFromBytes returned nil")
|
||||
}
|
||||
|
||||
// Assert new checksum matches the content
|
||||
matchError := chksmOut.Matches(myData, 0)
|
||||
if matchError != nil {
|
||||
t.Fatalf("Checksum mismatch on chksmOut: %v", matchError)
|
||||
}
|
||||
|
||||
// Assert they are exactly equal
|
||||
if !chksmOut.Equal(chksm) {
|
||||
t.Fatalf("Checksum mismatch: expected %+v, got %+v", chksm, chksmOut)
|
||||
}
|
||||
}
|
||||
|
||||
// TestChecksumSerializeDeserializeMultiPart checks AppendTo can be reversed by ChecksumFromBytes
|
||||
// for multipart checksum
|
||||
func TestChecksumSerializeDeserializeMultiPart(t *testing.T) {
|
||||
// Create dummy data that we'll split into 3 parts
|
||||
dummyData := []byte("The quick brown fox jumps over the lazy dog. " +
|
||||
"Pack my box with five dozen brown eggs. " +
|
||||
"Have another go it will all make sense in the end!")
|
||||
|
||||
// Split data into 3 parts
|
||||
partSize := len(dummyData) / 3
|
||||
part1Data := dummyData[0:partSize]
|
||||
part2Data := dummyData[partSize : 2*partSize]
|
||||
part3Data := dummyData[2*partSize:]
|
||||
|
||||
// Calculate CRC32C checksum for each part using NewChecksumFromData
|
||||
checksumType := ChecksumCRC32C
|
||||
|
||||
part1Checksum := NewChecksumFromData(checksumType, part1Data)
|
||||
part2Checksum := NewChecksumFromData(checksumType, part2Data)
|
||||
part3Checksum := NewChecksumFromData(checksumType, part3Data)
|
||||
|
||||
// Combine the raw checksums (this is what happens in CompleteMultipartUpload)
|
||||
var checksumCombined []byte
|
||||
checksumCombined = append(checksumCombined, part1Checksum.Raw...)
|
||||
checksumCombined = append(checksumCombined, part2Checksum.Raw...)
|
||||
checksumCombined = append(checksumCombined, part3Checksum.Raw...)
|
||||
|
||||
// Create the final checksum (checksum of the combined checksums)
|
||||
// Add BOTH the multipart flag AND the includes-multipart flag
|
||||
finalChecksumType := checksumType | ChecksumMultipart | ChecksumIncludesMultipart
|
||||
finalChecksum := NewChecksumFromData(finalChecksumType, checksumCombined)
|
||||
|
||||
// Set WantParts to indicate 3 parts
|
||||
finalChecksum.WantParts = 3
|
||||
|
||||
// Test AppendTo serialization
|
||||
var serialized []byte
|
||||
serialized = finalChecksum.AppendTo(serialized, checksumCombined)
|
||||
|
||||
// Use ChecksumFromBytes to deserialize the final checksum
|
||||
chksmOut := ChecksumFromBytes(serialized)
|
||||
if chksmOut == nil {
|
||||
t.Fatal("ChecksumFromBytes returned nil")
|
||||
}
|
||||
|
||||
// Assert they are exactly equal
|
||||
if !chksmOut.Equal(finalChecksum) {
|
||||
t.Fatalf("Checksum mismatch: expected %+v, got %+v", finalChecksum, chksmOut)
|
||||
}
|
||||
|
||||
// Serialize what we got from ChecksumFromBytes
|
||||
serializedOut := chksmOut.AppendTo(nil, checksumCombined)
|
||||
|
||||
// Read part checksums from serializedOut
|
||||
readParts := ReadPartCheckSums(serializedOut)
|
||||
expectedChecksums := []string{
|
||||
part1Checksum.Encoded,
|
||||
part2Checksum.Encoded,
|
||||
part3Checksum.Encoded,
|
||||
}
|
||||
for i, expected := range expectedChecksums {
|
||||
if got := readParts[i][ChecksumCRC32C.String()]; got != expected {
|
||||
t.Fatalf("want part%dChecksum.Encoded %s, got %s", i+1, expected, got)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -51,11 +51,18 @@ type Reader struct {
|
|||
checksum etag.ETag
|
||||
contentSHA256 []byte
|
||||
|
||||
// Content checksum
|
||||
// Client-provided content checksum
|
||||
contentHash Checksum
|
||||
contentHasher hash.Hash
|
||||
disableMD5 bool
|
||||
|
||||
// Server side computed checksum. In some cases, like CopyObject, a new checksum
|
||||
// needs to be computed and saved on the destination object, but the client
|
||||
// does not provide it. Not calculated if client-side contentHash is set.
|
||||
ServerSideChecksumType ChecksumType
|
||||
ServerSideHasher hash.Hash
|
||||
ServerSideChecksumResult *Checksum
|
||||
|
||||
trailer http.Header
|
||||
|
||||
sha256 hash.Hash
|
||||
|
@ -247,6 +254,16 @@ func (r *Reader) AddNonTrailingChecksum(cs *Checksum, ignoreValue bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AddServerSideChecksumHasher adds a new hasher for computing the server-side checksum.
|
||||
func (r *Reader) AddServerSideChecksumHasher(t ChecksumType) {
|
||||
h := t.Hasher()
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
r.ServerSideHasher = h
|
||||
r.ServerSideChecksumType = t
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.src.Read(p)
|
||||
r.bytesRead += int64(n)
|
||||
|
@ -255,6 +272,8 @@ func (r *Reader) Read(p []byte) (int, error) {
|
|||
}
|
||||
if r.contentHasher != nil {
|
||||
r.contentHasher.Write(p[:n])
|
||||
} else if r.ServerSideHasher != nil {
|
||||
r.ServerSideHasher.Write(p[:n])
|
||||
}
|
||||
|
||||
if err == io.EOF { // Verify content SHA256, if set.
|
||||
|
@ -293,6 +312,9 @@ func (r *Reader) Read(p []byte) (int, error) {
|
|||
}
|
||||
return n, err
|
||||
}
|
||||
} else if r.ServerSideHasher != nil {
|
||||
sum := r.ServerSideHasher.Sum(nil)
|
||||
r.ServerSideChecksumResult = NewChecksumWithType(r.ServerSideChecksumType, base64.StdEncoding.EncodeToString(sum))
|
||||
}
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
|
|
|
@ -75,7 +75,10 @@ var matchingFuncNames = [...]string{
|
|||
var (
|
||||
quietFlag, jsonFlag, anonFlag bool
|
||||
// Custom function to format error
|
||||
errorFmtFunc func(string, error, bool) string
|
||||
// can be registered by RegisterError
|
||||
errorFmtFunc = func(introMsg string, err error, jsonFlag bool) string {
|
||||
return fmt.Sprintf("msg: %s\n err:%s", introMsg, err)
|
||||
}
|
||||
)
|
||||
|
||||
// EnableQuiet - turns quiet option on.
|
||||
|
|
|
@ -230,7 +230,7 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error {
|
|||
}()
|
||||
|
||||
// Start parsers
|
||||
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
|
||||
for range runtime.GOMAXPROCS(0) {
|
||||
go func() {
|
||||
for in := range r.input {
|
||||
if len(in.input) == 0 {
|
||||
|
|
|
@ -173,7 +173,7 @@ func (r *PReader) startReaders() {
|
|||
}()
|
||||
|
||||
// Start parsers
|
||||
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
|
||||
for range runtime.GOMAXPROCS(0) {
|
||||
go func() {
|
||||
for in := range r.input {
|
||||
if len(in.input) == 0 {
|
||||
|
|
|
@ -332,7 +332,7 @@ func (d *Decoder) u4() rune {
|
|||
// logic taken from:
|
||||
// github.com/buger/jsonparser/blob/master/escape.go#L20
|
||||
var h [4]int
|
||||
for i := 0; i < 4; i++ {
|
||||
for i := range 4 {
|
||||
c := d.next()
|
||||
switch {
|
||||
case c >= '0' && c <= '9':
|
||||
|
|
Loading…
Reference in New Issue