mirror of https://github.com/minio/minio.git
Compare commits
8 Commits
RELEASE.20
...
master
| Author | SHA1 | Date |
|---|---|---|
|
|
58659f26f4 | |
|
|
3a0cc6c86e | |
|
|
10b0a234d2 | |
|
|
18f97e70b1 | |
|
|
52eee5a2f1 | |
|
|
c6d3aac5c4 | |
|
|
fa18589d1c | |
|
|
05e569960a |
|
|
@ -1,8 +1,14 @@
|
|||
FROM minio/minio:latest
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE
|
||||
|
||||
RUN chmod -R 777 /usr/bin
|
||||
|
||||
COPY ./minio /usr/bin/minio
|
||||
COPY ./minio-${TARGETARCH}.${RELEASE} /usr/bin/minio
|
||||
COPY ./minio-${TARGETARCH}.${RELEASE}.minisig /usr/bin/minio.minisig
|
||||
COPY ./minio-${TARGETARCH}.${RELEASE}.sha256sum /usr/bin/minio.sha256sum
|
||||
|
||||
COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ mc admin info local
|
|||
```
|
||||
|
||||
See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool.
|
||||
For application developers, see <https://docs.min.io/community/minio-object-store/developers/minio-drivers.html> to view MinIO SDKs for supported languages.
|
||||
For application developers, see <https://docs.min.io/enterprise/aistor-object-store/developers/sdk/> to view MinIO SDKs for supported languages.
|
||||
|
||||
> [!NOTE]
|
||||
> Production environments using compiled-from-source MinIO binaries do so at their own risk.
|
||||
|
|
@ -102,7 +102,7 @@ docker run -p 9000:9000 -p 9001:9001 myminio:minio server /tmp/minio --console-a
|
|||
```
|
||||
|
||||
Complete documentation for building Docker containers, managing custom images, or loading images into orchestration platforms is out of scope for this documentation.
|
||||
You can modify the `Dockerfile` and `dockerscripts/socker-entrypoint.sh` as-needed to reflect your specific image requirements.
|
||||
You can modify the `Dockerfile` and `dockerscripts/docker-entrypoint.sh` as-needed to reflect your specific image requirements.
|
||||
|
||||
See the [MinIO Container](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html#deploy-minio-container) documentation for more guidance on running MinIO within a Container image.
|
||||
|
||||
|
|
@ -147,7 +147,7 @@ Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-o
|
|||
- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html)
|
||||
- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html)
|
||||
- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html)
|
||||
- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/enterprise/aistor-object-store/developers/sdk/go/)
|
||||
|
||||
## Contribute to MinIO Project
|
||||
|
||||
|
|
|
|||
|
|
@ -889,6 +889,12 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err
|
|||
}
|
||||
|
||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
// Don't write a response if one has already been written.
|
||||
// Fixes https://github.com/minio/minio/issues/21633
|
||||
if headersAlreadyWritten(w) {
|
||||
return
|
||||
}
|
||||
|
||||
if statusCode == 0 {
|
||||
statusCode = 200
|
||||
}
|
||||
|
|
@ -1015,3 +1021,45 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er
|
|||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
||||
}
|
||||
|
||||
type unwrapper interface {
|
||||
Unwrap() http.ResponseWriter
|
||||
}
|
||||
|
||||
// headersAlreadyWritten returns true if the headers have already been written
|
||||
// to this response writer. It will unwrap the ResponseWriter if possible to try
|
||||
// and find a trackingResponseWriter.
|
||||
func headersAlreadyWritten(w http.ResponseWriter) bool {
|
||||
for {
|
||||
if trw, ok := w.(*trackingResponseWriter); ok {
|
||||
return trw.headerWritten
|
||||
} else if uw, ok := w.(unwrapper); ok {
|
||||
w = uw.Unwrap()
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// trackingResponseWriter wraps a ResponseWriter and notes when WriterHeader has
|
||||
// been called. This allows high level request handlers to check if something
|
||||
// has already sent the header.
|
||||
type trackingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
headerWritten bool
|
||||
}
|
||||
|
||||
func (w *trackingResponseWriter) WriteHeader(statusCode int) {
|
||||
if !w.headerWritten {
|
||||
w.headerWritten = true
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *trackingResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.ResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func (w *trackingResponseWriter) Unwrap() http.ResponseWriter {
|
||||
return w.ResponseWriter
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,8 +18,12 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
)
|
||||
|
||||
// Tests object location.
|
||||
|
|
@ -122,3 +126,89 @@ func TestGetURLScheme(t *testing.T) {
|
|||
t.Errorf("Expected %s, got %s", httpsScheme, gotScheme)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrackingResponseWriter(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
trw.WriteHeader(123)
|
||||
if !trw.headerWritten {
|
||||
t.Fatal("headerWritten was not set by WriteHeader call")
|
||||
}
|
||||
|
||||
_, err := trw.Write([]byte("hello"))
|
||||
if err != nil {
|
||||
t.Fatalf("Write unexpectedly failed: %v", err)
|
||||
}
|
||||
|
||||
// Check that WriteHeader and Write were called on the underlying response writer
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != 123 {
|
||||
t.Fatalf("unexpected status: %v", resp.StatusCode)
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("reading response body failed: %v", err)
|
||||
}
|
||||
if string(body) != "hello" {
|
||||
t.Fatalf("response body incorrect: %v", string(body))
|
||||
}
|
||||
|
||||
// Check that Unwrap works
|
||||
if trw.Unwrap() != rw {
|
||||
t.Fatalf("Unwrap returned wrong result: %v", trw.Unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadersAlreadyWritten(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
|
||||
if headersAlreadyWritten(trw) {
|
||||
t.Fatal("headers have not been written yet")
|
||||
}
|
||||
|
||||
trw.WriteHeader(123)
|
||||
if !headersAlreadyWritten(trw) {
|
||||
t.Fatal("headers were written")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadersAlreadyWrittenWrapped(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
wrap1 := &gzhttp.NoGzipResponseWriter{ResponseWriter: trw}
|
||||
wrap2 := &gzhttp.NoGzipResponseWriter{ResponseWriter: wrap1}
|
||||
|
||||
if headersAlreadyWritten(wrap2) {
|
||||
t.Fatal("headers have not been written yet")
|
||||
}
|
||||
|
||||
wrap2.WriteHeader(123)
|
||||
if !headersAlreadyWritten(wrap2) {
|
||||
t.Fatal("headers were written")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteResponseHeadersNotWritten(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw}
|
||||
|
||||
writeResponse(trw, 299, []byte("hello"), "application/foo")
|
||||
|
||||
resp := rw.Result()
|
||||
if resp.StatusCode != 299 {
|
||||
t.Fatal("response wasn't written")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteResponseHeadersWritten(t *testing.T) {
|
||||
rw := httptest.NewRecorder()
|
||||
rw.Code = -1
|
||||
trw := &trackingResponseWriter{ResponseWriter: rw, headerWritten: true}
|
||||
|
||||
writeResponse(trw, 200, []byte("hello"), "application/foo")
|
||||
|
||||
if rw.Code != -1 {
|
||||
t.Fatalf("response was written when it shouldn't have been (Code=%v)", rw.Code)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -218,6 +218,8 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
|||
handlerName := getHandlerName(f, "objectAPIHandlers")
|
||||
|
||||
var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
|
||||
w = &trackingResponseWriter{ResponseWriter: w}
|
||||
|
||||
// Wrap the actual handler with the appropriate tracing middleware.
|
||||
var tracedHandler http.HandlerFunc
|
||||
if handlerFlags.has(traceHdrsS3HFlag) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,225 @@
|
|||
// Copyright (c) 2015-2025 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
|
||||
// TestNewMultipartUploadConditionalWithReadQuorumFailure tests that conditional
|
||||
// multipart uploads (with if-match/if-none-match) behave correctly when read quorum
|
||||
// cannot be reached.
|
||||
//
|
||||
// Related to: https://github.com/minio/minio/issues/21603
|
||||
//
|
||||
// Should return an error when read quorum cannot
|
||||
// be reached, as we cannot reliably determine if the precondition is met.
|
||||
func TestNewMultipartUploadConditionalWithReadQuorumFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
obj, fsDirs, err := prepareErasure16(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer obj.Shutdown(context.Background())
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
z := obj.(*erasureServerPools)
|
||||
xl := z.serverPools[0].sets[0]
|
||||
|
||||
bucket := "test-bucket"
|
||||
object := "test-object"
|
||||
|
||||
err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Put an initial object so it exists
|
||||
_, err = obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")),
|
||||
int64(len("initial-value")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get object info to capture the ETag
|
||||
objInfo, err := obj.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
existingETag := objInfo.ETag
|
||||
|
||||
// Simulate read quorum failure by taking enough disks offline
|
||||
// With 16 disks (EC 8+8), read quorum is 9. Taking 8 disks offline leaves only 8,
|
||||
// which is below read quorum.
|
||||
erasureDisks := xl.getDisks()
|
||||
z.serverPools[0].erasureDisksMu.Lock()
|
||||
xl.getDisks = func() []StorageAPI {
|
||||
for i := range erasureDisks[:8] {
|
||||
erasureDisks[i] = nil
|
||||
}
|
||||
return erasureDisks
|
||||
}
|
||||
z.serverPools[0].erasureDisksMu.Unlock()
|
||||
|
||||
t.Run("if-none-match with read quorum failure", func(t *testing.T) {
|
||||
// Test Case 1: if-none-match (create only if doesn't exist)
|
||||
// With if-none-match: *, this should only succeed if object doesn't exist.
|
||||
// Since read quorum fails, we can't determine if object exists.
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfNoneMatch: "*",
|
||||
},
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
// Precondition fails if object exists (ETag is not empty)
|
||||
return oi.ETag != ""
|
||||
},
|
||||
}
|
||||
|
||||
_, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Errorf("Expected read quorum error when if-none-match is used with quorum failure, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("if-match with wrong ETag and read quorum failure", func(t *testing.T) {
|
||||
// Test Case 2: if-match with WRONG ETag
|
||||
// This should fail even without quorum issues, but with quorum failure
|
||||
// we can't verify the ETag at all.
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfMatch: "wrong-etag-12345",
|
||||
},
|
||||
HasIfMatch: true,
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
// Precondition fails if ETags don't match
|
||||
return oi.ETag != "wrong-etag-12345"
|
||||
},
|
||||
}
|
||||
|
||||
_, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Logf("Got error (as expected): %v", err)
|
||||
t.Logf("But expected read quorum error, not object-not-found error")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("if-match with correct ETag and read quorum failure", func(t *testing.T) {
|
||||
// Test Case 3: if-match with CORRECT ETag but read quorum failure
|
||||
// Even with the correct ETag, we shouldn't proceed if we can't verify it.
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfMatch: existingETag,
|
||||
},
|
||||
HasIfMatch: true,
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
// Precondition fails if ETags don't match
|
||||
return oi.ETag != existingETag
|
||||
},
|
||||
}
|
||||
|
||||
_, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Errorf("Expected read quorum error when if-match is used with quorum failure, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestCompleteMultipartUploadConditionalWithReadQuorumFailure tests that conditional
|
||||
// complete multipart upload operations behave correctly when read quorum cannot be reached.
|
||||
func TestCompleteMultipartUploadConditionalWithReadQuorumFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
obj, fsDirs, err := prepareErasure16(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer obj.Shutdown(context.Background())
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
z := obj.(*erasureServerPools)
|
||||
xl := z.serverPools[0].sets[0]
|
||||
|
||||
bucket := "test-bucket"
|
||||
object := "test-object"
|
||||
|
||||
err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Put an initial object
|
||||
_, err = obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")),
|
||||
int64(len("initial-value")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Start a multipart upload WITHOUT conditional checks (this should work)
|
||||
res, err := obj.NewMultipartUpload(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Upload a part
|
||||
partData := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||
md5Hex := getMD5Hash(partData)
|
||||
_, err = obj.PutObjectPart(ctx, bucket, object, res.UploadID, 1,
|
||||
mustGetPutObjReader(t, bytes.NewReader(partData), int64(len(partData)), md5Hex, ""),
|
||||
ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now simulate read quorum failure
|
||||
erasureDisks := xl.getDisks()
|
||||
z.serverPools[0].erasureDisksMu.Lock()
|
||||
xl.getDisks = func() []StorageAPI {
|
||||
for i := range erasureDisks[:8] {
|
||||
erasureDisks[i] = nil
|
||||
}
|
||||
return erasureDisks
|
||||
}
|
||||
z.serverPools[0].erasureDisksMu.Unlock()
|
||||
|
||||
t.Run("complete multipart with if-none-match and read quorum failure", func(t *testing.T) {
|
||||
// Try to complete the multipart upload with if-none-match
|
||||
// This should fail because we can't verify the condition due to read quorum failure
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfNoneMatch: "*",
|
||||
},
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
return oi.ETag != ""
|
||||
},
|
||||
}
|
||||
|
||||
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
||||
_, err := obj.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, parts, opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Errorf("Expected read quorum error, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -390,7 +390,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
|||
if err == nil && opts.CheckPrecondFn(obj) {
|
||||
return nil, PreConditionFailed{}
|
||||
}
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -1114,7 +1114,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
|||
if err == nil && opts.CheckPrecondFn(obj) {
|
||||
return ObjectInfo{}, PreConditionFailed{}
|
||||
}
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,150 @@
|
|||
// Copyright (c) 2015-2025 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
|
||||
// TestPutObjectConditionalWithReadQuorumFailure tests that conditional
|
||||
// PutObject operations (with if-match/if-none-match) behave correctly when read quorum
|
||||
// cannot be reached.
|
||||
//
|
||||
// Related to: https://github.com/minio/minio/issues/21603
|
||||
//
|
||||
// Should return an error when read quorum cannot
|
||||
// be reached, as we cannot reliably determine if the precondition is met.
|
||||
func TestPutObjectConditionalWithReadQuorumFailure(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
obj, fsDirs, err := prepareErasure16(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer obj.Shutdown(context.Background())
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
z := obj.(*erasureServerPools)
|
||||
xl := z.serverPools[0].sets[0]
|
||||
|
||||
bucket := "test-bucket"
|
||||
object := "test-object"
|
||||
|
||||
err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Put an initial object so it exists
|
||||
_, err = obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")),
|
||||
int64(len("initial-value")), "", ""), ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get object info to capture the ETag
|
||||
objInfo, err := obj.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
existingETag := objInfo.ETag
|
||||
|
||||
// Simulate read quorum failure by taking enough disks offline
|
||||
// With 16 disks (EC 8+8), read quorum is 9. Taking 8 disks offline leaves only 8,
|
||||
// which is below read quorum.
|
||||
erasureDisks := xl.getDisks()
|
||||
z.serverPools[0].erasureDisksMu.Lock()
|
||||
xl.getDisks = func() []StorageAPI {
|
||||
for i := range erasureDisks[:8] {
|
||||
erasureDisks[i] = nil
|
||||
}
|
||||
return erasureDisks
|
||||
}
|
||||
z.serverPools[0].erasureDisksMu.Unlock()
|
||||
|
||||
t.Run("if-none-match with read quorum failure", func(t *testing.T) {
|
||||
// Test Case 1: if-none-match (create only if doesn't exist)
|
||||
// With if-none-match: *, this should only succeed if object doesn't exist.
|
||||
// Since read quorum fails, we can't determine if object exists.
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfNoneMatch: "*",
|
||||
},
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
// Precondition fails if object exists (ETag is not empty)
|
||||
return oi.ETag != ""
|
||||
},
|
||||
}
|
||||
|
||||
_, err := obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("new-value")),
|
||||
int64(len("new-value")), "", ""), opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Errorf("Expected read quorum error when if-none-match is used with quorum failure, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("if-match with read quorum failure", func(t *testing.T) {
|
||||
// Test Case 2: if-match (update only if ETag matches)
|
||||
// With if-match: <etag>, this should only succeed if object exists with matching ETag.
|
||||
// Since read quorum fails, we can't determine if object exists or ETag matches.
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfMatch: existingETag,
|
||||
},
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
// Precondition fails if ETag doesn't match
|
||||
return oi.ETag != existingETag
|
||||
},
|
||||
}
|
||||
|
||||
_, err := obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("updated-value")),
|
||||
int64(len("updated-value")), "", ""), opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Errorf("Expected read quorum error when if-match is used with quorum failure, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("if-match wrong etag with read quorum failure", func(t *testing.T) {
|
||||
// Test Case 3: if-match with wrong ETag
|
||||
// Even if the ETag doesn't match, we should still get read quorum error
|
||||
// because we can't read the object to check the condition.
|
||||
opts := ObjectOptions{
|
||||
UserDefined: map[string]string{
|
||||
xhttp.IfMatch: "wrong-etag",
|
||||
},
|
||||
CheckPrecondFn: func(oi ObjectInfo) bool {
|
||||
// Precondition fails if ETag doesn't match
|
||||
return oi.ETag != "wrong-etag"
|
||||
},
|
||||
}
|
||||
|
||||
_, err := obj.PutObject(ctx, bucket, object,
|
||||
mustGetPutObjReader(t, bytes.NewReader([]byte("should-fail")),
|
||||
int64(len("should-fail")), "", ""), opts)
|
||||
if !isErrReadQuorum(err) {
|
||||
t.Errorf("Expected read quorum error when if-match is used with quorum failure (even with wrong ETag), got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -1274,7 +1274,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||
if err == nil && opts.CheckPrecondFn(obj) {
|
||||
return objInfo, PreConditionFailed{}
|
||||
}
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) {
|
||||
if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -386,7 +386,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(minioNamespace, "capacity_raw", "total"),
|
||||
"Total capacity online in the cluster",
|
||||
"Total capacity online in current MinIO server instance",
|
||||
nil, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(GetTotalCapacity(server.Disks)),
|
||||
|
|
@ -396,7 +396,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(minioNamespace, "capacity_raw_free", "total"),
|
||||
"Total free capacity online in the cluster",
|
||||
"Total free capacity online in current MinIO server instance",
|
||||
nil, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(GetTotalCapacityFree(server.Disks)),
|
||||
|
|
@ -408,7 +408,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(minioNamespace, "capacity_usable", "total"),
|
||||
"Total usable capacity online in the cluster",
|
||||
"Total usable capacity online in current MinIO server instance",
|
||||
nil, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(GetTotalUsableCapacity(server.Disks, sinfo)),
|
||||
|
|
@ -418,7 +418,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
|||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(minioNamespace, "capacity_usable_free", "total"),
|
||||
"Total free usable capacity online in the cluster",
|
||||
"Total free usable capacity online in current MinIO server instance",
|
||||
nil, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(GetTotalUsableCapacityFree(server.Disks, sinfo)),
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ var startsWithConds = map[string]bool{
|
|||
"$x-amz-algorithm": false,
|
||||
"$x-amz-credential": false,
|
||||
"$x-amz-date": false,
|
||||
"$tagging": false,
|
||||
}
|
||||
|
||||
// Add policy conditionals.
|
||||
|
|
|
|||
|
|
@ -1,37 +1,69 @@
|
|||
#!/bin/bash
|
||||
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
set -ex
|
||||
|
||||
remote=$(git remote get-url upstream)
|
||||
if test "$remote" != "git@github.com:minio/minio.git"; then
|
||||
echo "Script requires that the 'upstream' remote is set to git@github.com:minio/minio.git"
|
||||
exit 1
|
||||
fi
|
||||
function _init() {
|
||||
## All binaries are static make sure to disable CGO.
|
||||
export CGO_ENABLED=0
|
||||
export CRED_DIR="/media/${USER}/minio"
|
||||
|
||||
git remote update upstream && git checkout master && git rebase upstream/master
|
||||
## List of architectures and OS to test coss compilation.
|
||||
SUPPORTED_OSARCH="linux/ppc64le linux/amd64 linux/arm64"
|
||||
|
||||
release=$(git describe --abbrev=0 --tags)
|
||||
remote=$(git remote get-url upstream)
|
||||
if test "$remote" != "git@github.com:minio/minio.git"; then
|
||||
echo "Script requires that the 'upstream' remote is set to git@github.com:minio/minio.git"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker buildx build --push --no-cache \
|
||||
--build-arg RELEASE="${release}" \
|
||||
-t "minio/minio:latest" \
|
||||
-t "minio/minio:latest-cicd" \
|
||||
-t "quay.io/minio/minio:latest" \
|
||||
-t "quay.io/minio/minio:latest-cicd" \
|
||||
-t "minio/minio:${release}" \
|
||||
-t "quay.io/minio/minio:${release}" \
|
||||
--platform=linux/arm64,linux/amd64,linux/ppc64le \
|
||||
-f Dockerfile.release .
|
||||
git remote update upstream && git checkout master && git rebase upstream/master
|
||||
|
||||
docker buildx prune -f
|
||||
release=$(git describe --abbrev=0 --tags)
|
||||
export release
|
||||
}
|
||||
|
||||
docker buildx build --push --no-cache \
|
||||
--build-arg RELEASE="${release}" \
|
||||
-t "minio/minio:${release}-cpuv1" \
|
||||
-t "quay.io/minio/minio:${release}-cpuv1" \
|
||||
--platform=linux/arm64,linux/amd64,linux/ppc64le \
|
||||
-f Dockerfile.release.old_cpu .
|
||||
function _build() {
|
||||
local osarch=$1
|
||||
IFS=/ read -r -a arr <<<"$osarch"
|
||||
os="${arr[0]}"
|
||||
arch="${arr[1]}"
|
||||
package=$(go list -f '{{.ImportPath}}')
|
||||
printf -- "--> %15s:%s\n" "${osarch}" "${package}"
|
||||
|
||||
docker buildx prune -f
|
||||
# go build -trimpath to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export MINIO_RELEASE=RELEASE
|
||||
LDFLAGS=$(go run buildscripts/gen-ldflags.go)
|
||||
go build -tags kqueue -trimpath --ldflags "${LDFLAGS}" -o ./minio-${arch}.${release}
|
||||
minisign -qQSm ./minio-${arch}.${release} -s "$CRED_DIR/minisign.key" <"$CRED_DIR/minisign-passphrase"
|
||||
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sha256sum_str=$(sha256sum <./minio-${arch}.${release})
|
||||
rc=$?
|
||||
if [ "$rc" -ne 0 ]; then
|
||||
abort "unable to generate sha256sum for ${1}"
|
||||
fi
|
||||
echo "${sha256sum_str// -/minio.${release}}" >./minio-${arch}.${release}.sha256sum
|
||||
}
|
||||
|
||||
function main() {
|
||||
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
|
||||
for each_osarch in ${SUPPORTED_OSARCH}; do
|
||||
_build "${each_osarch}"
|
||||
done
|
||||
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
||||
docker buildx build --push --no-cache \
|
||||
--build-arg RELEASE="${release}" \
|
||||
-t "registry.min.dev/community/minio:latest" \
|
||||
-t "registry.min.dev/community/minio:${release}" \
|
||||
--platform=linux/arm64,linux/amd64,linux/ppc64le \
|
||||
-f Dockerfile .
|
||||
|
||||
docker buildx prune -f
|
||||
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
}
|
||||
|
||||
_init && main "$@"
|
||||
|
|
|
|||
|
|
@ -1,417 +0,0 @@
|
|||
# Metrics Version 3
|
||||
|
||||
In metrics version 3, all metrics are available under the following base endpoint:
|
||||
|
||||
```
|
||||
/minio/metrics/v3
|
||||
```
|
||||
|
||||
To query metrics of a specific type, append the appropriate path to the base endpoint.
|
||||
Querying the base endpoint returns "404 Not Found."
|
||||
|
||||
Metrics are organized into groups at paths **relative** to the top-level endpoint above.
|
||||
|
||||
Metrics are also available using the [MinIO Admin Client](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) and the `mc admin prometheus metrics` command. For more information, see [Metrics and Alerts](https://docs.min.io/community/minio-object-store/operations/monitoring/metrics-and-alerts.html) in the MinIO Documentation.
|
||||
|
||||
## Metrics Request Handling
|
||||
|
||||
Each endpoint can be queried as needed via a scrape configuration in Prometheus or a compatible metrics collection tool. You should schedule scrape operations so a prior scrape completes before the next one begins.
|
||||
|
||||
For ease of configuration, each (non-empty) parent of the path serves all the metric endpoints at its child paths. For example, to query all system metrics scrape `/minio/metrics/v3/system/`.
|
||||
|
||||
Each metric endpoint may support multiple child endpoints. For example, the `/v3/system/` metric has multiple child groups as `/v3/system/<child>`. Querying the parent endpoint returns metrics for all child groups. Querying a child group returns only metrics for that child.
|
||||
|
||||
### Per-bucket Metrics
|
||||
|
||||
Metrics with a `/bucket` component in the path return results for each specified bucket in the deployment. These endpoints **require** providing a list of buckets as a `bucket` query parameter. The endpoint then returns only metrics for the given buckets, with the bucket name in a `bucket` label.
|
||||
|
||||
For example, to query API metrics for buckets `test1` and `test2`, make a scrape request to `/minio/metrics/v3/api/bucket?buckets=test1,test2`.
|
||||
|
||||
### List Available Metrics
|
||||
|
||||
Instead of a metrics scrape, you can list the metrics that would be returned by a path by adding a `list` query parameter. The MinIO server then lists all available metrics that could be returned. Note that during an actual metrics scrape only metrics with available _values_ are returned. Metrics with null values are omitted from the scrape results.
|
||||
|
||||
To set the output format, set the request `Content-Type` to the desired format. Accepted values are `application/json` for JSON output or `text/plain` for a Markdown-formatted table. The default is Markdown.
|
||||
|
||||
For example, the following returns a list of all available bucket metrics:
|
||||
```
|
||||
/minio/metrics/v3/api/bucket?list
|
||||
```
|
||||
|
||||
## Metric Categories
|
||||
|
||||
At a high level, metrics are grouped into categories as described in the following sections. The path in each of the tables is relative to the top-level endpoint. Note that the standard GoCollector metrics are not shown.
|
||||
|
||||
### Request metrics
|
||||
|
||||
Metrics about requests served by the current node.
|
||||
|
||||
| Path | Description |
|
||||
|-----------------|-----------------------------------------------|
|
||||
| `/api/requests` | Metrics over all requests. |
|
||||
| `/bucket/api` | Metrics over all requests for a given bucket. |
|
||||
|
||||
#### `/api/requests`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|------------------------------------------------|--------------------------------------------------------------------------------|----------------------------------------------|
|
||||
| `minio_api_requests_rejected_auth_total` | Total number of requests rejected for auth failure. <br><br>Type: counter | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_rejected_header_total` | Total number of requests rejected for invalid header. <br><br>Type: counter | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_rejected_timestamp_total` | Total number of requests rejected for invalid timestamp. <br><br>Type: counter | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_rejected_invalid_total` | Total number of invalid requests. <br><br>Type: counter | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_waiting_total` | Total number of requests in the waiting queue. <br><br>Type: gauge | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_incoming_total` | Total number of incoming requests. <br><br>Type: gauge | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_inflight_total` | Total number of requests currently in flight. <br><br>Type: gauge | `name`, `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_total` | Total number of requests. <br><br>Type: counter | `name`, `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_errors_total` | Total number of requests with 4xx or 5xx errors. <br><br>Type: counter | `name`, `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_5xx_errors_total` | Total number of requests with 5xx errors. <br><br>Type: counter | `name`, `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_4xx_errors_total` | Total number of requests with 4xx errors. <br><br>Type: counter | `name`, `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_canceled_total` | Total number of requests canceled by the client. <br><br>Type: counter | `name`, `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_ttfb_seconds_distribution` | Distribution of time to first byte across API calls. <br><br>Type: counter | `name`, `type`, `le`, `pool_index`, `server` |
|
||||
| `minio_api_requests_traffic_sent_bytes` | Total number of bytes sent. <br><br>Type: counter | `type`, `pool_index`, `server` |
|
||||
| `minio_api_requests_traffic_received_bytes` | Total number of bytes received. <br><br>Type: counter | `type`, `pool_index`, `server` |
|
||||
|
||||
#### `/bucket/api`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|----------------------------------------------|-----------------------------------------------------------------------------------------|--------------------------------------------------------|
|
||||
| `minio_bucket_api_traffic_received_bytes` | Total number of bytes sent for a bucket. <br><br>Type: counter | `bucket`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_traffic_sent_bytes` | Total number of bytes received for a bucket. <br><br>Type: counter | `bucket`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_inflight_total` | Total number of requests currently in flight for a bucket. <br><br>Type: gauge | `bucket`, `name`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_total` | Total number of requests for a bucket. <br><br>Type: counter | `bucket`, `name`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_canceled_total` | Total number of requests canceled by the client for a bucket. <br><br>Type: counter | `bucket`, `name`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_4xx_errors_total` | Total number of requests with 4xx errors for a bucket. <br><br>Type: counter | `bucket`, `name`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_5xx_errors_total` | Total number of requests with 5xx errors for a bucket. <br><br>Type: counter | `bucket`, `name`, `type`, `server`, `pool_index` |
|
||||
| `minio_bucket_api_ttfb_seconds_distribution` | Distribution of time to first byte across API calls for a bucket. <br><br>Type: counter | `bucket`, `name`, `le`, `type`, `server`, `pool_index` |
|
||||
|
||||
### Audit metrics
|
||||
|
||||
Metrics about the MinIO audit functionality.
|
||||
|
||||
| Path | Description |
|
||||
|----------|-----------------------------------------|
|
||||
| `/audit` | Metrics related to audit functionality. |
|
||||
|
||||
#### `/audit`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-----------------------------------|---------------------------------------------------------------------------------|-----------------------|
|
||||
| `minio_audit_failed_messages` | Total number of messages that failed to send since start. <br><br>Type: counter | `target_id`, `server` |
|
||||
| `minio_audit_target_queue_length` | Number of unsent messages in queue for target. <br><br>Type: gauge | `target_id`, `server` |
|
||||
| `minio_audit_total_messages` | Total number of messages sent since start. <br><br>Type: counter | `target_id`, `server` |
|
||||
|
||||
### Cluster metrics
|
||||
|
||||
Metrics about an entire MinIO cluster.
|
||||
|
||||
| Path | Description |
|
||||
|--------------------------|--------------------------------|
|
||||
| `/cluster/config` | Cluster configuration metrics. |
|
||||
| `/cluster/erasure-set` | Erasure set metrics. |
|
||||
| `/cluster/health` | Cluster health metrics. |
|
||||
| `/cluster/iam` | Cluster iam metrics. |
|
||||
| `/cluster/usage/buckets` | Object statistics by bucket. |
|
||||
| `/cluster/usage/objects` | Object statistics. |
|
||||
|
||||
#### `/cluster/config`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|----------------------------------------|--------------------------------------------------------------|--------|
|
||||
| `minio_cluster_config_rrs_parity` | Reduced redundancy storage class parity. <br><br>Type: gauge | |
|
||||
| `minio_cluster_config_standard_parity` | Standard storage class parity. <br><br>Type: gauge | |
|
||||
|
||||
#### `/cluster/erasure-set`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|--------------------------------------------------|---------------------------------------------------------------------------------------------------------|---------------------|
|
||||
| `minio_cluster_erasure_set_overall_write_quorum` | Overall write quorum across pools and sets. <br><br>Type: gauge | |
|
||||
| `minio_cluster_erasure_set_overall_health` | Overall health across pools and sets (1=healthy, 0=unhealthy). <br><br>Type: gauge | |
|
||||
| `minio_cluster_erasure_set_read_quorum` | Read quorum for the erasure set in a pool. <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_write_quorum` | Write quorum for the erasure set in a pool. <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_online_drives_count` | Count of online drives in the erasure set in a pool. <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_healing_drives_count` | Count of healing drives in the erasure set in a pool. <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_health` | Health of the erasure set in a pool (1=healthy, 0=unhealthy). <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_read_tolerance` | Number of drive failures that can be tolerated without disrupting read operations. <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_write_tolerance` | Number of drive failures that can be tolerated without disrupting write operations. <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_read_health` | Health of the erasure set in a pool for read operations (1=healthy, 0=unhealthy). <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
| `minio_cluster_erasure_set_write_health` | Health of the erasure set in a pool for write operations (1=healthy, 0=unhealthy). <br><br>Type: gauge | `pool_id`, `set_id` |
|
||||
|
||||
#### `/cluster/health`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|----------------------------------------------------|---------------------------------------------------------------------|--------|
|
||||
| `minio_cluster_health_drives_offline_count` | Count of offline drives in the cluster. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_drives_online_count` | Count of online drives in the cluster. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_drives_count` | Count of all drives in the cluster. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_nodes_offline_count` | Count of offline nodes in the cluster. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_nodes_online_count` | Count of online nodes in the cluster. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_capacity_raw_total_bytes` | Total cluster raw storage capacity in bytes. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_capacity_raw_free_bytes` | Total cluster raw storage free in bytes. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_capacity_usable_total_bytes` | Total cluster usable storage capacity in bytes. <br><br>Type: gauge | |
|
||||
| `minio_cluster_health_capacity_usable_free_bytes` | Total cluster usable storage free in bytes. <br><br>Type: gauge | |
|
||||
|
||||
#### `/cluster/iam`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------|--------|
|
||||
| `minio_cluster_iam_last_sync_duration_millis` | Last successful IAM data sync duration in milliseconds. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_plugin_authn_service_failed_requests_minute` | When plugin authentication is configured, returns failed requests count in the last full minute. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_plugin_authn_service_last_fail_seconds` | When plugin authentication is configured, returns time (in seconds) since the last failed request to the service. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_plugin_authn_service_last_succ_seconds` | When plugin authentication is configured, returns time (in seconds) since the last successful request to the service. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_plugin_authn_service_succ_avg_rtt_ms_minute` | When plugin authentication is configured, returns average round-trip time of successful requests in the last full minute. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_plugin_authn_service_succ_max_rtt_ms_minute` | When plugin authentication is configured, returns maximum round-trip time of successful requests in the last full minute. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_plugin_authn_service_total_requests_minute` | When plugin authentication is configured, returns total requests count in the last full minute. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_since_last_sync_millis` | Time (in milliseconds) since last successful IAM data sync. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_sync_failures` | Number of failed IAM data syncs since server start. <br><br>Type: counter | |
|
||||
| `minio_cluster_iam_sync_successes` | Number of successful IAM data syncs since server start. <br><br>Type: counter | |
|
||||
|
||||
#### `/cluster/usage/buckets`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-----------------------------------------------------------------|--------------------------------------------------------------------------------------|-------------------|
|
||||
| `minio_cluster_usage_buckets_since_last_update_seconds` | Time since last update of usage metrics in seconds. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_buckets_total_bytes` | Total bucket size in bytes. <br><br>Type: gauge | `bucket` |
|
||||
| `minio_cluster_usage_buckets_objects_count` | Total object count in bucket. <br><br>Type: gauge | `bucket` |
|
||||
| `minio_cluster_usage_buckets_versions_count` | Total object versions count in bucket, including delete markers. <br><br>Type: gauge | `bucket` |
|
||||
| `minio_cluster_usage_buckets_delete_markers_count` | Total delete markers count in bucket. <br><br>Type: gauge | `bucket` |
|
||||
| `minio_cluster_usage_buckets_quota_total_bytes` | Total bucket quota in bytes. <br><br>Type: gauge | `bucket` |
|
||||
| `minio_cluster_usage_buckets_object_size_distribution` | Bucket object size distribution. <br><br>Type: gauge | `range`, `bucket` |
|
||||
| `minio_cluster_usage_buckets_object_version_count_distribution` | Bucket object version count distribution. <br><br>Type: gauge | `range`, `bucket` |
|
||||
|
||||
#### `/cluster/usage/objects`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|----------------------------------------------------------|------------------------------------------------------------------------------------|---------|
|
||||
| `minio_cluster_usage_objects_since_last_update_seconds` | Time since last update of usage metrics in seconds. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_objects_total_bytes` | Total cluster usage in bytes. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_objects_count` | Total cluster objects count. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_objects_versions_count` | Total cluster object versions count, including delete markers. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_objects_delete_markers_count` | Total cluster delete markers count. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_objects_buckets_count` | Total cluster buckets count. <br><br>Type: gauge | |
|
||||
| `minio_cluster_usage_objects_size_distribution` | Cluster object size distribution. <br><br>Type: gauge | `range` |
|
||||
| `minio_cluster_usage_objects_version_count_distribution` | Cluster object version count distribution. <br><br>Type: gauge | `range` |
|
||||
|
||||
### Debug metrics
|
||||
|
||||
Standard Go runtime metrics from the [Prometheus Go Client base collector](https://github.com/prometheus/client_golang).
|
||||
|
||||
| Path | Description |
|
||||
|-------------|---------------------|
|
||||
| `/debug/go` | Go runtime metrics. |
|
||||
|
||||
### ILM metrics
|
||||
|
||||
Metrics about the MinIO ILM functionality.
|
||||
|
||||
| Path | Description |
|
||||
|--------|---------------------------------------|
|
||||
| `/ilm` | Metrics related to ILM functionality. |
|
||||
|
||||
#### `/ilm`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-------------------------------------------------------|---------------------------------------------------------------------------------------------------|----------|
|
||||
| `minio_cluster_ilm_expiry_pending_tasks` | Number of pending ILM expiry tasks in the queue. <br><br>Type: gauge | `server` |
|
||||
| `minio_cluster_ilm_transition_active_tasks` | Number of active ILM transition tasks. <br><br>Type: gauge | `server` |
|
||||
| `minio_cluster_ilm_transition_pending_tasks` | Number of pending ILM transition tasks in the queue. <br><br>Type: gauge | `server` |
|
||||
| `minio_cluster_ilm_transition_missed_immediate_tasks` | Number of missed immediate ILM transition tasks. <br><br>Type: counter | `server` |
|
||||
| `minio_cluster_ilm_versions_scanned` | Total number of object versions checked for ILM actions since server start. <br><br>Type: counter | `server` |
|
||||
|
||||
### Logger webhook metrics
|
||||
|
||||
Metrics about MinIO logger webhooks.
|
||||
|
||||
| Path | Description |
|
||||
|-------------------|-------------------------------------|
|
||||
| `/logger/webhook` | Metrics related to logger webhooks. |
|
||||
|
||||
#### `/logger/webhook`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-----------------------------------------|---------------------------------------------------------------------|------------------------------|
|
||||
| `minio_logger_webhook_failed_messages` | Number of messages that failed to send. <br><br>Type: counter | `server`, `name`, `endpoint` |
|
||||
| `minio_logger_webhook_queue_length` | Webhook queue length. <br><br>Type: gauge | `server`, `name`, `endpoint` |
|
||||
| `minio_logger_webhook_total_message` | Total number of messages sent to this target. <br><br>Type: counter | `server`, `name`, `endpoint` |
|
||||
|
||||
### Notification metrics
|
||||
|
||||
Metrics about the MinIO notification functionality.
|
||||
|
||||
| Path | Description |
|
||||
|-----------------|------------------------------------------------|
|
||||
| `/notification` | Metrics related to notification functionality. |
|
||||
|
||||
#### `/notification`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-----------------------------------------------|-------------------------------------------------------------------------------------------------------|----------|
|
||||
| `minio_notification_current_send_in_progress` | Number of concurrent async Send calls active to all targets. <br><br>Type: counter | `server` |
|
||||
| `minio_notification_events_errors_total` | Total number of events that failed to send to the targets. <br><br>Type: counter | `server` |
|
||||
| `minio_notification_events_sent_total` | Total number of events sent to the targets. <br><br>Type: counter | `server` |
|
||||
| `minio_notification_events_skipped_total` | Number of events not sent to the targets due to the in-memory queue being full. <br><br>Type: counter | `server` |
|
||||
|
||||
### Replication metrics
|
||||
|
||||
Metrics about MinIO site and bucket replication.
|
||||
|
||||
| Path | Description |
|
||||
|-----------------------|----------------------------------------|
|
||||
| `/bucket/replication` | Metrics related to bucket replication. |
|
||||
| `/replication` | Metrics related to site replication. |
|
||||
|
||||
#### `/replication`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|---------------------------------------------------|---------------------------------------------------------------------------------------------|----------|
|
||||
| `minio_replication_average_active_workers` | Average number of active replication workers. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_average_queued_bytes` | Average number of bytes queued for replication since server start. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_average_queued_count` | Average number of objects queued for replication since server start. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_average_data_transfer_rate` | Average replication data transfer rate in bytes/sec. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_current_active_workers` | Total number of active replication workers. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_current_data_transfer_rate` | Current replication data transfer rate in bytes/sec. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_last_minute_queued_bytes` | Number of bytes queued for replication in the last full minute. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_last_minute_queued_count` | Number of objects queued for replication in the last full minute. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_max_active_workers` | Maximum number of active replication workers seen since server start. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_max_queued_bytes` | Maximum number of bytes queued for replication since server start. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_max_queued_count` | Maximum number of objects queued for replication since server start. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_max_data_transfer_rate` | Maximum replication data transfer rate in bytes/sec since server start. <br><br>Type: gauge | `server` |
|
||||
| `minio_replication_recent_backlog_count` | Total number of objects seen in replication backlog in the last 5 minutes <br><br>Type: gauge | `server` |
|
||||
#### `/bucket/replication`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|---------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|
|
||||
| `minio_bucket_replication_last_hour_failed_bytes` | Total number of bytes on a bucket which failed to replicate at least once in the last hour. <br><br>Type: gauge | `bucket`, `server` |
|
||||
| `minio_bucket_replication_last_hour_failed_count` | Total number of objects on a bucket which failed to replicate in the last hour. <br><br>Type: gauge | `bucket`, `server` |
|
||||
| `minio_bucket_replication_last_minute_failed_bytes` | Total number of bytes on a bucket which failed at least once in the last full minute. <br><br>Type: gauge | `bucket`, `server` |
|
||||
| `minio_bucket_replication_last_minute_failed_count` | Total number of objects on a bucket which failed to replicate in the last full minute. <br><br>Type: gauge | `bucket`, `server` |
|
||||
| `minio_bucket_replication_latency_ms` | Replication latency on a bucket in milliseconds. <br><br>Type: gauge | `bucket`, `operation`, `range`, `targetArn`, `server` |
|
||||
| `minio_bucket_replication_proxied_delete_tagging_requests_total` | Number of DELETE tagging requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_get_requests_failures` | Number of failures in GET requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_get_requests_total` | Number of GET requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_get_tagging_requests_failures` | Number of failures in GET tagging requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_get_tagging_requests_total` | Number of GET tagging requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_head_requests_failures` | Number of failures in HEAD requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_head_requests_total` | Number of HEAD requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_put_tagging_requests_failures` | Number of failures in PUT tagging requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_put_tagging_requests_total` | Number of PUT tagging requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_sent_bytes` | Total number of bytes replicated to the target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_sent_count` | Total number of objects replicated to the target. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_total_failed_bytes` | Total number of bytes failed to replicate at least once since server start. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_total_failed_count` | Total number of objects that failed to replicate since server start. <br><br>Type: counter | `bucket`, `server` |
|
||||
| `minio_bucket_replication_proxied_delete_tagging_requests_failures` | Number of failures in DELETE tagging requests proxied to replication target. <br><br>Type: counter | `bucket`, `server` |
|
||||
|
||||
### Scanner metrics
|
||||
|
||||
Metrics about the MinIO scanner.
|
||||
|
||||
| Path | Description |
|
||||
|------------|---------------------------------------|
|
||||
| `/scanner` | Metrics related to the MinIO scanner. |
|
||||
|
||||
#### `/scanner`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|--------------------------------------------|-----------------------------------------------------------------------------------|----------|
|
||||
| `minio_scanner_bucket_scans_finished` | Total number of bucket scans completed since server start. <br><br>Type: counter | `server` |
|
||||
| `minio_scanner_bucket_scans_started` | Total number of bucket scans started since server start. <br><br>Type: counter | `server` |
|
||||
| `minio_scanner_directories_scanned` | Total number of directories scanned since server start. <br><br>Type: counter | `server` |
|
||||
| `minio_scanner_last_activity_seconds` | Time elapsed (in seconds) since last scan activity. <br><br>Type: gauge | `server` |
|
||||
| `minio_scanner_objects_scanned` | Total number of unique objects scanned since server start. <br><br>Type: counter | `server` |
|
||||
| `minio_scanner_versions_scanned` | Total number of object versions scanned since server start. <br><br>Type: counter | `server` |
|
||||
|
||||
### System metrics
|
||||
|
||||
Metrics about the MinIO process and the node.
|
||||
|
||||
| Path | Description |
|
||||
|-----------------------------|----------------------------------------------------|
|
||||
| `/system/cpu` | Metrics about CPUs on the system. |
|
||||
| `/system/drive` | Metrics about drives on the system. |
|
||||
| `/system/network/internode` | Metrics about internode requests made by the node. |
|
||||
| `/system/memory` | Metrics about memory on the system. |
|
||||
| `/system/process` | Standard process metrics. |
|
||||
|
||||
#### `/system/drive`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|------------------------------------------------|-----------------------------------------------------------------------------------------|--------------------------------------------------------------------|
|
||||
| `minio_system_drive_used_bytes` | Total storage used on a drive in bytes. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_free_bytes` | Total storage free on a drive in bytes. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_total_bytes` | Total storage available on a drive in bytes. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_used_inodes` | Total used inodes on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_free_inodes` | Total free inodes on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_total_inodes` | Total inodes available on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_timeout_errors_total` | Total timeout errors on a drive. <br><br>Type: counter | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_io_errors_total` | Total I/O errors on a drive. <br><br>Type: counter | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_availability_errors_total` | Total availability errors (I/O errors, timeouts) on a drive. <br><br>Type: counter | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_waiting_io` | Total waiting I/O operations on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_api_latency_micros` | Average last minute latency in µs for drive API storage operations. <br><br>Type: gauge | `drive`, `api`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_offline_count` | Count of offline drives. <br><br>Type: gauge | `pool_index`, `server` |
|
||||
| `minio_system_drive_online_count` | Count of online drives. <br><br>Type: gauge | `pool_index`, `server` |
|
||||
| `minio_system_drive_count` | Count of all drives. <br><br>Type: gauge | `pool_index`, `server` |
|
||||
| `minio_system_drive_health` | Drive health (0 = offline, 1 = healthy, 2 = healing). <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_reads_per_sec` | Reads per second on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_reads_kb_per_sec` | Kilobytes read per second on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_reads_await` | Average time for read requests served on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_writes_per_sec` | Writes per second on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_writes_kb_per_sec` | Kilobytes written per second on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_writes_await` | Average time for write requests served on a drive. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
| `minio_system_drive_perc_util` | Percentage of time the disk was busy. <br><br>Type: gauge | `drive`, `set_index`, `drive_index`, `pool_index`, `server` |
|
||||
|
||||
#### `/system/memory`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|----------------------------------|---------------------------------------------------------|----------|
|
||||
| `minio_system_memory_used` | Used memory on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_used_perc` | Used memory percentage on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_free` | Free memory on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_total` | Total memory on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_buffers` | Buffers memory on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_cache` | Cache memory on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_shared` | Shared memory on the node. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_memory_available` | Available memory on the node. <br><br>Type: gauge | `server` |
|
||||
|
||||
#### `/system/cpu`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|-------------------------------|---------------------------------------------------------|----------|
|
||||
| `minio_system_cpu_avg_idle` | Average CPU idle time. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_avg_iowait` | Average CPU IOWait time. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_load` | CPU load average 1min. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_load_perc` | CPU load average 1min (percentage). <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_nice` | CPU nice time. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_steal` | CPU steal time. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_system` | CPU system time. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_cpu_user` | CPU user time. <br><br>Type: gauge | `server` |
|
||||
|
||||
#### `/system/network/internode`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|------------------------------------------------------|-------------------------------------------------------------------------------|------------------------|
|
||||
| `minio_system_network_internode_errors_total` | Total number of failed internode calls. <br><br>Type: counter | `server`, `pool_index` |
|
||||
| `minio_system_network_internode_dial_errors_total` | Total number of internode TCP dial timeouts and errors. <br><br>Type: counter | `server`, `pool_index` |
|
||||
| `minio_system_network_internode_dial_avg_time_nanos` | Average dial time of internodes TCP calls in nanoseconds. <br><br>Type: gauge | `server`, `pool_index` |
|
||||
| `minio_system_network_internode_sent_bytes_total` | Total number of bytes sent to other peer nodes. <br><br>Type: counter | `server`, `pool_index` |
|
||||
| `minio_system_network_internode_recv_bytes_total` | Total number of bytes received from other peer nodes. <br><br>Type: counter | `server`, `pool_index` |
|
||||
|
||||
#### `/system/process`
|
||||
|
||||
| Name | Description | Labels |
|
||||
|----------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|----------|
|
||||
| `minio_system_process_locks_read_total` | Number of current READ locks on this peer. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_locks_write_total` | Number of current WRITE locks on this peer. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_cpu_total_seconds` | Total user and system CPU time spent in seconds. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_go_routine_total` | Total number of go routines running. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_io_rchar_bytes` | Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_io_read_bytes` | Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_io_wchar_bytes` | Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_io_write_bytes` | Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_start_time_seconds` | Start time for MinIO process in seconds since Unix epoch. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_uptime_seconds` | Uptime for MinIO process in seconds. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_file_descriptor_limit_total` | Limit on total number of open file descriptors for the MinIO Server process. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_file_descriptor_open_total` | Total number of open file descriptors by the MinIO Server process. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_syscall_read_total` | Total read SysCalls to the kernel. /proc/[pid]/io syscr. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_syscall_write_total` | Total write SysCalls to the kernel. /proc/[pid]/io syscw. <br><br>Type: counter | `server` |
|
||||
| `minio_system_process_resident_memory_bytes` | Resident memory size in bytes. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_virtual_memory_bytes` | Virtual memory size in bytes. <br><br>Type: gauge | `server` |
|
||||
| `minio_system_process_virtual_memory_max_bytes` | Maximum virtual memory size in bytes. <br><br>Type: gauge | `server` |
|
||||
Loading…
Reference in New Issue