Merge branch 'main' into release-3.4
CI / Go tests (push) Has been cancelled
Details
CI / More Go tests (push) Has been cancelled
Details
CI / Go tests with previous Go version (push) Has been cancelled
Details
CI / UI tests (push) Has been cancelled
Details
CI / Go tests on Windows (push) Has been cancelled
Details
CI / Mixins tests (push) Has been cancelled
Details
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
Details
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
Details
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
Details
CI / Check generated parser (push) Has been cancelled
Details
CI / golangci-lint (push) Has been cancelled
Details
CI / fuzzing (push) Has been cancelled
Details
CI / codeql (push) Has been cancelled
Details
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
Details
CI / Publish main branch artifacts (push) Has been cancelled
Details
CI / Publish release artefacts (push) Has been cancelled
Details
CI / Publish UI on npm Registry (push) Has been cancelled
Details
CI / Go tests (push) Has been cancelled
Details
CI / More Go tests (push) Has been cancelled
Details
CI / Go tests with previous Go version (push) Has been cancelled
Details
CI / UI tests (push) Has been cancelled
Details
CI / Go tests on Windows (push) Has been cancelled
Details
CI / Mixins tests (push) Has been cancelled
Details
CI / Build Prometheus for common architectures (0) (push) Has been cancelled
Details
CI / Build Prometheus for common architectures (1) (push) Has been cancelled
Details
CI / Build Prometheus for common architectures (2) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (0) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (1) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (10) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (11) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (2) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (3) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (4) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (5) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (6) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (7) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (8) (push) Has been cancelled
Details
CI / Build Prometheus for all architectures (9) (push) Has been cancelled
Details
CI / Check generated parser (push) Has been cancelled
Details
CI / golangci-lint (push) Has been cancelled
Details
CI / fuzzing (push) Has been cancelled
Details
CI / codeql (push) Has been cancelled
Details
CI / Report status of build Prometheus for all architectures (push) Has been cancelled
Details
CI / Publish main branch artifacts (push) Has been cancelled
Details
CI / Publish release artefacts (push) Has been cancelled
Details
CI / Publish UI on npm Registry (push) Has been cancelled
Details
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
This commit is contained in:
commit
8c7fc100a9
|
@ -18,8 +18,8 @@ jobs:
|
|||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_npm: true
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make -C documentation/examples/remote_storage
|
||||
- run: make -C documentation/examples
|
||||
|
||||
|
@ -206,7 +206,7 @@ jobs:
|
|||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v2.0.2
|
||||
version: v2.1.5
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
@ -254,7 +254,7 @@ jobs:
|
|||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
|
|
@ -27,12 +27,12 @@ jobs:
|
|||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13
|
||||
uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13
|
||||
uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13
|
||||
uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
||||
|
|
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # tag=v3.28.13
|
||||
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # tag=v3.28.16
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
@ -2,6 +2,7 @@ formatters:
|
|||
enable:
|
||||
- gci
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
|
@ -10,10 +11,9 @@ formatters:
|
|||
- prefix(github.com/prometheus/prometheus)
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
exclusions:
|
||||
paths:
|
||||
# Skip autogenerated files.
|
||||
- ^.*\.(pb|y)\.go$
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/prometheus/prometheus
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
|
@ -72,9 +72,7 @@ linters:
|
|||
- linters:
|
||||
- godot
|
||||
source: "^// ==="
|
||||
- linters:
|
||||
- perfsprint
|
||||
text: "fmt.Sprintf can be replaced with string concatenation"
|
||||
warn-unused: true
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
|
@ -164,34 +162,10 @@ linters:
|
|||
- name: unused-parameter
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
staticcheck:
|
||||
checks:
|
||||
- all # Enable all checks.
|
||||
# FIXME: We should enable this check once we have fixed all the issues.
|
||||
- -QF1001
|
||||
- -QF1002
|
||||
- -QF1003
|
||||
- -QF1006
|
||||
- -QF1007
|
||||
- -QF1008
|
||||
- -QF1009
|
||||
- -QF1010
|
||||
- -QF1012
|
||||
- -ST1000
|
||||
- -ST1003
|
||||
- -ST1005
|
||||
- -ST1012
|
||||
- -ST1016
|
||||
- -ST1020
|
||||
testifylint:
|
||||
disable:
|
||||
- empty # FIXME
|
||||
- equal-values # FIXME
|
||||
- float-compare
|
||||
- formatter # FIXME
|
||||
- go-require
|
||||
- len # FIXME
|
||||
- useless-assert # FIXME: wait for golangci-lint > v2.0.2
|
||||
enable-all: true
|
||||
|
||||
output:
|
||||
|
|
|
@ -14,10 +14,8 @@ build:
|
|||
all:
|
||||
- netgo
|
||||
- builtinassets
|
||||
- stringlabels
|
||||
windows:
|
||||
- builtinassets
|
||||
- stringlabels
|
||||
ldflags: |
|
||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
|
|
|
@ -61,7 +61,8 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.1.5
|
||||
GOLANGCI_FMT_OPTS ?=
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
@ -156,9 +157,13 @@ $(GOTEST_DIR):
|
|||
@mkdir -p $@
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
common-format: $(GOLANGCI_LINT)
|
||||
@echo ">> formatting code"
|
||||
$(GO) fmt $(pkgs)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> formatting code with golangci-lint"
|
||||
$(GOLANGCI_LINT) fmt $(GOLANGCI_FMT_OPTS)
|
||||
endif
|
||||
|
||||
.PHONY: common-vet
|
||||
common-vet:
|
||||
|
@ -248,8 +253,8 @@ $(PROMU):
|
|||
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||
rm -r $(PROMU_TMP)
|
||||
|
||||
.PHONY: proto
|
||||
proto:
|
||||
.PHONY: common-proto
|
||||
common-proto:
|
||||
@echo ">> generating code from proto files"
|
||||
@./scripts/genproto.sh
|
||||
|
||||
|
|
|
@ -14,7 +14,8 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/
|
|||
| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.4 | 2025-04-29 | Jan-Otto Kröpke (Github: @jkroepke)|
|
||||
| v3.5 | 2025-06-03 | **volunteer welcome** |
|
||||
| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.6 | 2025-07-15 | **volunteer welcome** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package.
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -651,6 +652,32 @@ func main() {
|
|||
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||
}
|
||||
|
||||
// Set Go runtime parameters before we get too far into initialization.
|
||||
updateGoGC(cfgFile, logger)
|
||||
if cfg.maxprocsEnable {
|
||||
l := func(format string, a ...interface{}) {
|
||||
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
||||
}
|
||||
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
||||
logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.memlimitEnable {
|
||||
if _, err := memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(cfg.memlimitRatio),
|
||||
memlimit.WithProvider(
|
||||
memlimit.ApplyFallback(
|
||||
memlimit.FromCgroup,
|
||||
memlimit.FromSystem,
|
||||
),
|
||||
),
|
||||
memlimit.WithLogger(logger.With("component", "automemlimit")),
|
||||
); err != nil {
|
||||
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now that the validity of the config is established, set the config
|
||||
// success metrics accordingly, although the config isn't really loaded
|
||||
// yet. This will happen later (including setting these metrics again),
|
||||
|
@ -801,29 +828,6 @@ func main() {
|
|||
ruleManager *rules.Manager
|
||||
)
|
||||
|
||||
if cfg.maxprocsEnable {
|
||||
l := func(format string, a ...interface{}) {
|
||||
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
||||
}
|
||||
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
||||
logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.memlimitEnable {
|
||||
if _, err := memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(cfg.memlimitRatio),
|
||||
memlimit.WithProvider(
|
||||
memlimit.ApplyFallback(
|
||||
memlimit.FromCgroup,
|
||||
memlimit.FromSystem,
|
||||
),
|
||||
),
|
||||
); err != nil {
|
||||
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !agentMode {
|
||||
opts := promql.EngineOpts{
|
||||
Logger: logger.With("component", "query engine"),
|
||||
|
@ -1509,6 +1513,14 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
|
|||
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||
}
|
||||
|
||||
updateGoGC(conf, logger)
|
||||
|
||||
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
||||
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateGoGC(conf *config.Config, logger *slog.Logger) {
|
||||
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
|
||||
if oldGoGC != conf.Runtime.GoGC {
|
||||
logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
|
||||
|
@ -1519,10 +1531,6 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg
|
|||
} else {
|
||||
os.Setenv("GOGC", "off")
|
||||
}
|
||||
|
||||
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
||||
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
func startsOrEndsWithQuote(s string) bool {
|
||||
|
@ -1914,10 +1922,8 @@ func (p *rwProtoMsgFlagParser) Set(opt string) error {
|
|||
if err := t.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, prev := range *p.msgs {
|
||||
if prev == t {
|
||||
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
|
||||
}
|
||||
if slices.Contains(*p.msgs, t) {
|
||||
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
|
||||
}
|
||||
*p.msgs = append(*p.msgs, t)
|
||||
return nil
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
@ -33,6 +34,7 @@ import (
|
|||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -41,6 +43,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -268,7 +271,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
require.Fail(t, "prometheus should be still running: %v", err)
|
||||
t.Fatalf("prometheus should be still running: %v", err)
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
|
@ -332,7 +335,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
|||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
require.Fail(t, "prometheus should be still running: %v", err)
|
||||
t.Fatalf("prometheus should be still running: %v", err)
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
|
@ -646,3 +649,118 @@ func TestRwProtoMsgFlagParser(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getGaugeValue(t *testing.T, body io.ReadCloser, metricName string) (float64, error) {
|
||||
t.Helper()
|
||||
|
||||
p := expfmt.TextParser{}
|
||||
metricFamilies, err := p.TextToMetricFamilies(body)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
metricFamily, ok := metricFamilies[metricName]
|
||||
if !ok {
|
||||
return 0, errors.New("metric family not found")
|
||||
}
|
||||
metric := metricFamily.GetMetric()
|
||||
if len(metric) != 1 {
|
||||
return 0, errors.New("metric not found")
|
||||
}
|
||||
return metric[0].GetGauge().GetValue(), nil
|
||||
}
|
||||
|
||||
func TestRuntimeGOGCConfig(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
config string
|
||||
gogcEnvVar string
|
||||
expectedGOGC int
|
||||
}{
|
||||
{
|
||||
name: "empty config file",
|
||||
expectedGOGC: 75,
|
||||
},
|
||||
{
|
||||
name: "empty config file with GOGC env var set",
|
||||
gogcEnvVar: "66",
|
||||
expectedGOGC: 66,
|
||||
},
|
||||
{
|
||||
name: "gogc set through config",
|
||||
config: `
|
||||
runtime:
|
||||
gogc: 77`,
|
||||
expectedGOGC: 77,
|
||||
},
|
||||
{
|
||||
name: "gogc set through config and env var",
|
||||
config: `
|
||||
runtime:
|
||||
gogc: 77`,
|
||||
gogcEnvVar: "88",
|
||||
expectedGOGC: 77,
|
||||
},
|
||||
{
|
||||
name: "incomplete runtime block",
|
||||
config: `
|
||||
runtime:`,
|
||||
expectedGOGC: 75,
|
||||
},
|
||||
{
|
||||
name: "incomplete runtime block and GOGC env var set",
|
||||
config: `
|
||||
runtime:`,
|
||||
gogcEnvVar: "88",
|
||||
expectedGOGC: 88,
|
||||
},
|
||||
{
|
||||
name: "unrelated config and GOGC env var set",
|
||||
config: `
|
||||
global:
|
||||
scrape_interval: 500ms`,
|
||||
gogcEnvVar: "80",
|
||||
expectedGOGC: 80,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
configFile := filepath.Join(tmpDir, "prometheus.yml")
|
||||
|
||||
port := testutil.RandomUnprivilegedPort(t)
|
||||
os.WriteFile(configFile, []byte(tc.config), 0o777)
|
||||
prom := prometheusCommandWithLogging(t, configFile, port, fmt.Sprintf("--storage.tsdb.path=%s", tmpDir))
|
||||
// Inject GOGC when set.
|
||||
prom.Env = os.Environ()
|
||||
if tc.gogcEnvVar != "" {
|
||||
prom.Env = append(prom.Env, fmt.Sprintf("GOGC=%s", tc.gogcEnvVar))
|
||||
}
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
var (
|
||||
r *http.Response
|
||||
err error
|
||||
)
|
||||
// Wait for the /metrics endpoint to be ready.
|
||||
require.Eventually(t, func() bool {
|
||||
r, err = http.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", port))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return r.StatusCode == http.StatusOK
|
||||
}, 5*time.Second, 50*time.Millisecond)
|
||||
defer r.Body.Close()
|
||||
|
||||
// Check the final GOGC that's set, consider go_gc_gogc_percent from /metrics as source of truth.
|
||||
gogc, err := getGaugeValue(t, r.Body, "go_gc_gogc_percent")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, float64(tc.expectedGOGC), gogc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) {
|
|||
_, err = p.configFile.Seek(0, 0)
|
||||
require.NoError(t, err)
|
||||
if queryLogFile != "" {
|
||||
_, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile)))
|
||||
_, err = fmt.Fprintf(p.configFile, "global:\n query_log_file: %s\n", queryLogFile)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
_, err = p.configFile.Write([]byte(p.configuration()))
|
||||
|
|
|
@ -119,7 +119,8 @@ func runTestSteps(t *testing.T, steps []struct {
|
|||
require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file")
|
||||
|
||||
port := testutil.RandomUnprivilegedPort(t)
|
||||
runPrometheusWithLogging(t, configFilePath, port)
|
||||
prom := prometheusCommandWithLogging(t, configFilePath, port, "--enable-feature=auto-reload-config", "--config.auto-reload-interval=1s")
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
baseURL := "http://localhost:" + strconv.Itoa(port)
|
||||
require.Eventually(t, func() bool {
|
||||
|
@ -197,14 +198,20 @@ func captureLogsToTLog(t *testing.T, r io.Reader) {
|
|||
}
|
||||
}
|
||||
|
||||
func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) {
|
||||
func prometheusCommandWithLogging(t *testing.T, configFilePath string, port int, extraArgs ...string) *exec.Cmd {
|
||||
stdoutPipe, stdoutWriter := io.Pipe()
|
||||
stderrPipe, stderrWriter := io.Pipe()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port))
|
||||
args := []string{
|
||||
"-test.main",
|
||||
"--config.file=" + configFilePath,
|
||||
"--web.listen-address=0.0.0.0:" + strconv.Itoa(port),
|
||||
}
|
||||
args = append(args, extraArgs...)
|
||||
prom := exec.Command(promPath, args...)
|
||||
prom.Stdout = stdoutWriter
|
||||
prom.Stderr = stderrWriter
|
||||
|
||||
|
@ -224,6 +231,5 @@ func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) {
|
|||
stderrWriter.Close()
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
require.NoError(t, prom.Start())
|
||||
return prom
|
||||
}
|
||||
|
|
|
@ -257,15 +257,15 @@ func main() {
|
|||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
|
||||
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
||||
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||
|
||||
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
|
||||
|
|
|
@ -510,7 +510,7 @@ func TestCheckRules(t *testing.T) {
|
|||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||
require.Equal(t, successExitCode, exitCode, "")
|
||||
require.Equal(t, successExitCode, exitCode)
|
||||
})
|
||||
|
||||
t.Run("rules-bad", func(t *testing.T) {
|
||||
|
@ -532,7 +532,7 @@ func TestCheckRules(t *testing.T) {
|
|||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||
require.Equal(t, failureExitCode, exitCode, "")
|
||||
require.Equal(t, failureExitCode, exitCode)
|
||||
})
|
||||
|
||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||
|
@ -554,7 +554,7 @@ func TestCheckRules(t *testing.T) {
|
|||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false))
|
||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
require.Equal(t, lintErrExitCode, exitCode)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -572,19 +572,19 @@ func TestCheckRulesWithRuleFiles(t *testing.T) {
|
|||
t.Run("rules-good", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml")
|
||||
require.Equal(t, successExitCode, exitCode, "")
|
||||
require.Equal(t, successExitCode, exitCode)
|
||||
})
|
||||
|
||||
t.Run("rules-bad", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml")
|
||||
require.Equal(t, failureExitCode, exitCode, "")
|
||||
require.Equal(t, failureExitCode, exitCode)
|
||||
})
|
||||
|
||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml")
|
||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
require.Equal(t, lintErrExitCode, exitCode)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
# Minimal test case to see that fuzzy compare is working as expected.
|
||||
# It should allow slight floating point differences through. Larger
|
||||
# floating point differences should still fail.
|
||||
|
||||
evaluation_interval: 1m
|
||||
fuzzy_compare: true
|
||||
|
||||
tests:
|
||||
- name: correct fuzzy match
|
||||
input_series:
|
||||
- series: test_low
|
||||
values: 2.9999999999999996
|
||||
- series: test_high
|
||||
values: 3.0000000000000004
|
||||
promql_expr_test:
|
||||
- expr: test_low
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- labels: test_low
|
||||
value: 3
|
||||
- expr: test_high
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- labels: test_high
|
||||
value: 3
|
||||
|
||||
- name: wrong fuzzy match
|
||||
input_series:
|
||||
- series: test_low
|
||||
values: 2.9999999999999987
|
||||
- series: test_high
|
||||
values: 3.0000000000000013
|
||||
promql_expr_test:
|
||||
- expr: test_low
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- labels: test_low
|
||||
value: 3
|
||||
- expr: test_high
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- labels: test_high
|
||||
value: 3
|
|
@ -0,0 +1,24 @@
|
|||
# Minimal test case to see that fuzzy compare can be turned off,
|
||||
# and slight floating point differences fail matching.
|
||||
|
||||
evaluation_interval: 1m
|
||||
fuzzy_compare: false
|
||||
|
||||
tests:
|
||||
- name: correct fuzzy match
|
||||
input_series:
|
||||
- series: test_low
|
||||
values: 2.9999999999999996
|
||||
- series: test_high
|
||||
values: 3.0000000000000004
|
||||
promql_expr_test:
|
||||
- expr: test_low
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- labels: test_low
|
||||
value: 3
|
||||
- expr: test_high
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- labels: test_high
|
||||
value: 3
|
|
@ -552,7 +552,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
|||
|
||||
postingInfos = postingInfos[:0]
|
||||
for _, n := range allLabelNames {
|
||||
values, err := ir.SortedLabelValues(ctx, n, selectors...)
|
||||
values, err := ir.SortedLabelValues(ctx, n, nil, selectors...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -568,7 +568,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
|||
|
||||
postingInfos = postingInfos[:0]
|
||||
for _, n := range allLabelNames {
|
||||
lv, err := ir.SortedLabelValues(ctx, n, selectors...)
|
||||
lv, err := ir.SortedLabelValues(ctx, n, nil, selectors...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -578,7 +578,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
|||
printInfo(postingInfos)
|
||||
|
||||
postingInfos = postingInfos[:0]
|
||||
lv, err := ir.SortedLabelValues(ctx, "__name__", selectors...)
|
||||
lv, err := ir.SortedLabelValues(ctx, "__name__", nil, selectors...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -130,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
|
|||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.RuleFiles...)
|
||||
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.FuzzyCompare, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
for _, e := range ers {
|
||||
tc.Fail(e.Error())
|
||||
|
@ -159,6 +160,7 @@ type unitTestFile struct {
|
|||
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||
GroupEvalOrder []string `yaml:"group_eval_order"`
|
||||
Tests []testGroup `yaml:"tests"`
|
||||
FuzzyCompare bool `yaml:"fuzzy_compare,omitempty"`
|
||||
}
|
||||
|
||||
// resolveAndGlobFilepaths joins all relative paths in a configuration
|
||||
|
@ -197,7 +199,7 @@ type testGroup struct {
|
|||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields bool, ruleFiles ...string) (outErr []error) {
|
||||
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields, fuzzyCompare bool, ruleFiles ...string) (outErr []error) {
|
||||
if debug {
|
||||
testStart := time.Now()
|
||||
fmt.Printf("DEBUG: Starting test %s\n", testname)
|
||||
|
@ -237,6 +239,14 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
|||
mint := time.Unix(0, 0).UTC()
|
||||
maxt := mint.Add(tg.maxEvalTime())
|
||||
|
||||
// Optional floating point compare fuzzing.
|
||||
var compareFloat64 cmp.Option = cmp.Options{}
|
||||
if fuzzyCompare {
|
||||
compareFloat64 = cmp.Comparer(func(x, y float64) bool {
|
||||
return x == y || math.Nextafter(x, math.Inf(-1)) == y || math.Nextafter(x, math.Inf(1)) == y
|
||||
})
|
||||
}
|
||||
|
||||
// Pre-processing some data for testing alerts.
|
||||
// All this preparation is so that we can test alerts as we evaluate the rules.
|
||||
// This avoids storing them in memory, as the number of evals might be high.
|
||||
|
@ -311,12 +321,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
|||
return errs
|
||||
}
|
||||
|
||||
for {
|
||||
if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
|
||||
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) {
|
||||
break
|
||||
}
|
||||
|
||||
for curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) &&
|
||||
time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint) {
|
||||
// We need to check alerts for this time.
|
||||
// If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval'
|
||||
// then we compare alerts with the Eval at `ts`.
|
||||
|
@ -374,7 +380,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
|||
sort.Sort(gotAlerts)
|
||||
sort.Sort(expAlerts)
|
||||
|
||||
if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal)) {
|
||||
if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal), compareFloat64) {
|
||||
var testName string
|
||||
if tg.TestGroupName != "" {
|
||||
testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName)
|
||||
|
@ -482,7 +488,7 @@ Outer:
|
|||
sort.Slice(gotSamples, func(i, j int) bool {
|
||||
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
|
||||
})
|
||||
if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal)) {
|
||||
if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal), compareFloat64) {
|
||||
errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr,
|
||||
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
|
||||
}
|
||||
|
|
|
@ -240,6 +240,29 @@ func TestRulesUnitTestRun(t *testing.T) {
|
|||
ignoreUnknownFields: true,
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "Test precise floating point comparison expected failure",
|
||||
args: args{
|
||||
files: []string{"./testdata/rules_run_no_fuzzy.yml"},
|
||||
},
|
||||
want: 1,
|
||||
},
|
||||
{
|
||||
name: "Test fuzzy floating point comparison correct match",
|
||||
args: args{
|
||||
run: []string{"correct"},
|
||||
files: []string{"./testdata/rules_run_fuzzy.yml"},
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "Test fuzzy floating point comparison wrong match",
|
||||
args: args{
|
||||
run: []string{"wrong"},
|
||||
files: []string{"./testdata/rules_run_fuzzy.yml"},
|
||||
},
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -169,23 +170,23 @@ var (
|
|||
// changes to DefaultNativeHistogramScrapeProtocols.
|
||||
ScrapeProtocols: DefaultScrapeProtocols,
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
}
|
||||
|
||||
DefaultRuntimeConfig = RuntimeConfig{
|
||||
// Go runtime tuning.
|
||||
GoGC: 75,
|
||||
GoGC: getGoGC(),
|
||||
}
|
||||
|
||||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
EnableCompression: true,
|
||||
// ScrapeTimeout, ScrapeInterval, ScrapeProtocols, AlwaysScrapeClassicHistograms, and ConvertClassicHistogramsToNHCB default to the configured globals.
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
EnableCompression: true,
|
||||
}
|
||||
|
||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||
|
@ -384,8 +385,6 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
// We have to restore it here.
|
||||
if c.Runtime.isZero() {
|
||||
c.Runtime = DefaultRuntimeConfig
|
||||
// Use the GOGC env var value if the runtime section is empty.
|
||||
c.Runtime.GoGC = getGoGCEnv()
|
||||
}
|
||||
|
||||
for _, rf := range c.RuleFiles {
|
||||
|
@ -489,6 +488,8 @@ type GlobalConfig struct {
|
|||
MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"`
|
||||
// Whether to convert all scraped classic histograms into native histograms with custom buckets.
|
||||
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
}
|
||||
|
||||
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||
|
@ -645,9 +646,12 @@ func (c *GlobalConfig) isZero() bool {
|
|||
c.QueryLogFile == "" &&
|
||||
c.ScrapeFailureLogFile == "" &&
|
||||
c.ScrapeProtocols == nil &&
|
||||
!c.ConvertClassicHistogramsToNHCB
|
||||
!c.ConvertClassicHistogramsToNHCB &&
|
||||
!c.AlwaysScrapeClassicHistograms
|
||||
}
|
||||
|
||||
const DefaultGoGCPercentage = 75
|
||||
|
||||
// RuntimeConfig configures the values for the process behavior.
|
||||
type RuntimeConfig struct {
|
||||
// The Go garbage collection target percentage.
|
||||
|
@ -690,7 +694,7 @@ type ScrapeConfig struct {
|
|||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
AlwaysScrapeClassicHistograms *bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
|
||||
ConvertClassicHistogramsToNHCB *bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// File to which scrape failures are logged.
|
||||
|
@ -904,6 +908,11 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
c.ConvertClassicHistogramsToNHCB = &global
|
||||
}
|
||||
|
||||
if c.AlwaysScrapeClassicHistograms == nil {
|
||||
global := globalConfig.AlwaysScrapeClassicHistograms
|
||||
c.AlwaysScrapeClassicHistograms = &global
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -931,6 +940,11 @@ func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool {
|
|||
return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB
|
||||
}
|
||||
|
||||
// AlwaysScrapeClassicHistogramsEnabled returns whether to always scrape classic histograms.
|
||||
func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool {
|
||||
return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms
|
||||
}
|
||||
|
||||
// StorageConfig configures runtime reloadable configuration options.
|
||||
type StorageConfig struct {
|
||||
TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"`
|
||||
|
@ -1096,13 +1110,11 @@ func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error
|
|||
return err
|
||||
}
|
||||
|
||||
for _, supportedVersion := range SupportedAlertmanagerAPIVersions {
|
||||
if *v == supportedVersion {
|
||||
return nil
|
||||
}
|
||||
if !slices.Contains(SupportedAlertmanagerAPIVersions, *v) {
|
||||
return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v)
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v)
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -1482,7 +1494,7 @@ func fileErr(filename string, err error) error {
|
|||
return fmt.Errorf("%q: %w", filePath(filename), err)
|
||||
}
|
||||
|
||||
func getGoGCEnv() int {
|
||||
func getGoGC() int {
|
||||
goGCEnv := os.Getenv("GOGC")
|
||||
// If the GOGC env var is set, use the same logic as upstream Go.
|
||||
if goGCEnv != "" {
|
||||
|
@ -1495,7 +1507,7 @@ func getGoGCEnv() int {
|
|||
return i
|
||||
}
|
||||
}
|
||||
return DefaultRuntimeConfig.GoGC
|
||||
return DefaultGoGCPercentage
|
||||
}
|
||||
|
||||
type translationStrategyOption string
|
||||
|
|
|
@ -102,6 +102,7 @@ var expectedConf = &Config{
|
|||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
ConvertClassicHistogramsToNHCB: false,
|
||||
},
|
||||
|
||||
|
@ -223,6 +224,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -339,6 +341,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
|
@ -440,6 +443,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -499,6 +503,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: "/metrics",
|
||||
|
@ -536,6 +541,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -579,6 +585,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -622,6 +629,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -655,6 +663,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -696,6 +705,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -734,6 +744,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -779,6 +790,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -814,6 +826,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -852,6 +865,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -883,6 +897,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -917,6 +932,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: "/federate",
|
||||
|
@ -951,6 +967,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -985,6 +1002,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1016,6 +1034,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1055,6 +1074,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1093,6 +1113,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1128,6 +1149,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1162,6 +1184,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1200,6 +1223,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1241,6 +1265,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1301,6 +1326,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1332,6 +1358,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
@ -1374,6 +1401,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
@ -1422,6 +1450,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1461,6 +1490,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
@ -1495,6 +1525,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -1531,6 +1562,7 @@ var expectedConf = &Config{
|
|||
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -2393,13 +2425,23 @@ func TestEmptyGlobalBlock(t *testing.T) {
|
|||
require.Equal(t, exp, *c)
|
||||
}
|
||||
|
||||
// ScrapeConfigOptions contains options for creating a scrape config.
|
||||
type ScrapeConfigOptions struct {
|
||||
JobName string
|
||||
ScrapeInterval model.Duration
|
||||
ScrapeTimeout model.Duration
|
||||
AlwaysScrapeClassicHistograms bool
|
||||
ConvertClassicHistToNHCB bool
|
||||
}
|
||||
|
||||
func TestGetScrapeConfigs(t *testing.T) {
|
||||
sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration, convertClassicHistToNHCB bool) *ScrapeConfig {
|
||||
// Helper function to create a scrape config with the given options.
|
||||
sc := func(opts ScrapeConfigOptions) *ScrapeConfig {
|
||||
return &ScrapeConfig{
|
||||
JobName: jobName,
|
||||
JobName: opts.JobName,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: scrapeInterval,
|
||||
ScrapeTimeout: scrapeTimeout,
|
||||
ScrapeInterval: opts.ScrapeInterval,
|
||||
ScrapeTimeout: opts.ScrapeTimeout,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
|
@ -2420,7 +2462,8 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(convertClassicHistToNHCB),
|
||||
AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2433,20 +2476,20 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
{
|
||||
name: "An included config file should be a valid global config.",
|
||||
configFile: "testdata/scrape_config_files.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false)},
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that only include a scrape config file.",
|
||||
configFile: "testdata/scrape_config_files_only.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false)},
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that combine scrape config files and scrape configs.",
|
||||
configFile: "testdata/scrape_config_files_combined.good.yml",
|
||||
expectedResult: []*ScrapeConfig{
|
||||
sc("node", model.Duration(60*time.Second), model.Duration(10*time.Second), false),
|
||||
sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false),
|
||||
sc("alertmanager", model.Duration(60*time.Second), model.Duration(10*time.Second), false),
|
||||
sc(ScrapeConfigOptions{JobName: "node", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}),
|
||||
sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}),
|
||||
sc(ScrapeConfigOptions{JobName: "alertmanager", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -2462,6 +2505,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
|
@ -2498,6 +2542,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
||||
MetricNameEscapingScheme: model.AllowUTF8,
|
||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
|
@ -2549,17 +2594,37 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
{
|
||||
name: "A global config that enables convert classic histograms to nhcb.",
|
||||
configFile: "testdata/global_convert_classic_hist_to_nhcb.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), true)},
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables convert classic histograms to nhcb and scrape config that disables the conversion",
|
||||
configFile: "testdata/local_disable_convert_classic_hist_to_nhcb.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false)},
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables convert classic histograms to nhcb and scrape config that enables the conversion",
|
||||
configFile: "testdata/local_convert_classic_hist_to_nhcb.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), true)},
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables always scrape classic histograms",
|
||||
configFile: "testdata/global_enable_always_scrape_classic_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables always scrape classic histograms",
|
||||
configFile: "testdata/global_disable_always_scrape_classic_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that disables always scrape classic histograms and scrape config that enables it",
|
||||
configFile: "testdata/local_enable_always_scrape_classic_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
{
|
||||
name: "A global config that enables always scrape classic histograms and scrape config that disables it",
|
||||
configFile: "testdata/local_disable_always_scrape_classic_hist.good.yml",
|
||||
expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
global:
|
||||
always_scrape_classic_histograms: false
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
|
@ -0,0 +1,6 @@
|
|||
global:
|
||||
always_scrape_classic_histograms: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
|
@ -0,0 +1,7 @@
|
|||
global:
|
||||
always_scrape_classic_histograms: true
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
always_scrape_classic_histograms: false
|
|
@ -0,0 +1,7 @@
|
|||
global:
|
||||
always_scrape_classic_histograms: false
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
||||
always_scrape_classic_histograms: true
|
|
@ -115,6 +115,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||
|
||||
region, err := metadata.Region()
|
||||
if err != nil {
|
||||
//nolint:staticcheck // Capitalized first word.
|
||||
return errors.New("Lightsail SD configuration requires a region")
|
||||
}
|
||||
c.Region = region
|
||||
|
|
|
@ -425,14 +425,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
|||
d := newDiscovery(t, config)
|
||||
|
||||
// Should be empty if not initialized.
|
||||
require.Equal(t, "", d.clientDatacenter)
|
||||
require.Empty(t, d.clientDatacenter)
|
||||
|
||||
err = d.getDatacenter()
|
||||
|
||||
// An error should be returned.
|
||||
require.EqualError(t, err, tc.errMessage)
|
||||
// Should still be empty.
|
||||
require.Equal(t, "", d.clientDatacenter)
|
||||
require.Empty(t, d.clientDatacenter)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,14 +53,16 @@ const (
|
|||
// the Discoverer interface.
|
||||
type hcloudDiscovery struct {
|
||||
*refresh.Discovery
|
||||
client *hcloud.Client
|
||||
port int
|
||||
client *hcloud.Client
|
||||
port int
|
||||
labelSelector string
|
||||
}
|
||||
|
||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||
func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) {
|
||||
d := &hcloudDiscovery{
|
||||
port: conf.Port,
|
||||
port: conf.Port,
|
||||
labelSelector: conf.LabelSelector,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
|
||||
|
@ -79,7 +81,10 @@ func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error
|
|||
}
|
||||
|
||||
func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
servers, err := d.client.Server.All(ctx)
|
||||
servers, err := d.client.Server.AllWithOpts(ctx, hcloud.ServerListOpts{ListOpts: hcloud.ListOpts{
|
||||
PerPage: 50,
|
||||
LabelSelector: d.labelSelector,
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -59,8 +59,11 @@ type SDConfig struct {
|
|||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||
Port int `yaml:"port"`
|
||||
Role Role `yaml:"role"`
|
||||
hcloudEndpoint string // For tests only.
|
||||
robotEndpoint string // For tests only.
|
||||
|
||||
LabelSelector string `yaml:"label_selector,omitempty"`
|
||||
|
||||
hcloudEndpoint string // For tests only.
|
||||
robotEndpoint string // For tests only.
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"log/slog"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -38,8 +39,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
// Required to get the GCP auth provider working.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Required to get the GCP auth provider working.
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
@ -210,18 +210,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
if _, ok := allowedSelectors[c.Role]; !ok {
|
||||
return fmt.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role)
|
||||
}
|
||||
var allowed bool
|
||||
for _, role := range allowedSelectors[c.Role] {
|
||||
if role == string(selector.Role) {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
if !slices.Contains(allowedSelectors[c.Role], string(selector.Role)) {
|
||||
return fmt.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", "))
|
||||
}
|
||||
|
||||
_, err := fields.ParseSelector(selector.Field)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -219,7 +219,7 @@ func podLabels(pod *apiv1.Pod) model.LabelSet {
|
|||
podPhaseLabel: lv(string(pod.Status.Phase)),
|
||||
podNodeNameLabel: lv(pod.Spec.NodeName),
|
||||
podHostIPLabel: lv(pod.Status.HostIP),
|
||||
podUID: lv(string(pod.ObjectMeta.UID)),
|
||||
podUID: lv(string(pod.UID)),
|
||||
}
|
||||
|
||||
addObjectMetaLabels(ls, pod.ObjectMeta, RolePod)
|
||||
|
|
|
@ -194,7 +194,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
events, err := d.client.ListEvents(ctx, &eventsOpts)
|
||||
if err != nil {
|
||||
var e *linodego.Error
|
||||
if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) {
|
||||
if !errors.As(err, &e) || e.Code != http.StatusUnauthorized {
|
||||
return nil, err
|
||||
}
|
||||
// If we get a 401, the token doesn't have `events:read_only` scope.
|
||||
|
|
|
@ -695,7 +695,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
for x := 0; x < totalUpdatesCount; x++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
require.FailNow(t, "%d: no update arrived within the timeout limit", x)
|
||||
t.Fatalf("%d: no update arrived within the timeout limit", x)
|
||||
case tgs := <-provUpdates:
|
||||
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
||||
for _, got := range discoveryManager.allGroups() {
|
||||
|
@ -769,12 +769,10 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
|
|||
}
|
||||
}
|
||||
}
|
||||
if match != present {
|
||||
msg := ""
|
||||
if !present {
|
||||
msg = "not"
|
||||
}
|
||||
require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets)
|
||||
if present {
|
||||
require.Truef(t, match, "%q must be present in Targets labels: %q", label, mergedTargets)
|
||||
} else {
|
||||
require.Falsef(t, match, "%q must be absent in Targets labels: %q", label, mergedTargets)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1091,9 +1089,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
targetGroups, ok := discoveryManager.targets[p]
|
||||
require.True(t, ok, "'%v' should be present in targets", p)
|
||||
// Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436.
|
||||
require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p)
|
||||
require.Empty(t, targetGroups, "'%v' should no longer have any associated target groups", p)
|
||||
require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.")
|
||||
require.Empty(t, syncedTargets["prometheus"], 0)
|
||||
require.Empty(t, syncedTargets["prometheus"])
|
||||
}
|
||||
|
||||
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
|
||||
|
@ -1373,10 +1371,10 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
|||
time.Sleep(expected.delay)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
require.FailNow(t, "step %d: no update received in the expected timeframe", i)
|
||||
t.Fatalf("step %d: no update received in the expected timeframe", i)
|
||||
case tgs, ok := <-mgr.SyncCh():
|
||||
require.True(t, ok, "step %d: discovery manager channel is closed", i)
|
||||
require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i)
|
||||
require.Len(t, tgs, len(expected.tgs), "step %d: targets mismatch", i)
|
||||
|
||||
for k := range expected.tgs {
|
||||
_, ok := tgs[k]
|
||||
|
|
|
@ -202,7 +202,7 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
|
|||
|
||||
tgt = tg.Targets[1]
|
||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
||||
"Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
|
||||
}
|
||||
|
||||
|
@ -300,9 +300,9 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
|
|||
|
||||
tgt := tg.Targets[0]
|
||||
require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]),
|
||||
"Wrong portMappings label from the first port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]),
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]),
|
||||
"Wrong portDefinitions label from the first port.")
|
||||
|
||||
tgt = tg.Targets[1]
|
||||
|
@ -354,12 +354,12 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
|
|||
|
||||
tgt := tg.Targets[0]
|
||||
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
|
||||
tgt = tg.Targets[1]
|
||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
}
|
||||
|
||||
|
@ -401,13 +401,13 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) {
|
|||
|
||||
tgt := tg.Targets[0]
|
||||
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
|
||||
tgt = tg.Targets[1]
|
||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
}
|
||||
|
||||
func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList {
|
||||
|
@ -458,12 +458,12 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
|
|||
tgt := tg.Targets[0]
|
||||
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
|
||||
tgt = tg.Targets[1]
|
||||
require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
}
|
||||
|
||||
func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList {
|
||||
|
@ -514,12 +514,12 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
|
|||
tgt := tg.Targets[0]
|
||||
require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
|
||||
tgt = tg.Targets[1]
|
||||
require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
}
|
||||
|
||||
func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList {
|
||||
|
@ -574,10 +574,10 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
|
|||
tgt := tg.Targets[0]
|
||||
require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.")
|
||||
|
||||
tgt = tg.Targets[1]
|
||||
require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.")
|
||||
require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.")
|
||||
}
|
||||
|
|
|
@ -235,10 +235,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
|||
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
|
||||
if len(networks) == 0 {
|
||||
// Try to lookup shared networks
|
||||
for {
|
||||
if !containerNetworkMode.IsContainer() {
|
||||
break
|
||||
}
|
||||
for containerNetworkMode.IsContainer() {
|
||||
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
||||
if !exists {
|
||||
break
|
||||
|
|
|
@ -182,9 +182,10 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
var ipv6Addresses []string
|
||||
|
||||
for _, ip := range server.PublicIPs {
|
||||
if ip.Family == instance.ServerIPIPFamilyInet {
|
||||
switch ip.Family {
|
||||
case instance.ServerIPIPFamilyInet:
|
||||
ipv4Addresses = append(ipv4Addresses, ip.Address.String())
|
||||
} else if ip.Family == instance.ServerIPIPFamilyInet6 {
|
||||
case instance.ServerIPIPFamilyInet6:
|
||||
ipv6Addresses = append(ipv6Addresses, ip.Address.String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,18 +141,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
if c.Server == "" {
|
||||
//nolint:staticcheck // Capitalized first word.
|
||||
return errors.New("Uyuni SD configuration requires server host")
|
||||
}
|
||||
|
||||
_, err = url.Parse(c.Server)
|
||||
if err != nil {
|
||||
//nolint:staticcheck // Capitalized first word.
|
||||
return fmt.Errorf("Uyuni Server URL is not valid: %w", err)
|
||||
}
|
||||
|
||||
if c.Username == "" {
|
||||
//nolint:staticcheck // Capitalized first word.
|
||||
return errors.New("Uyuni SD configuration requires a username")
|
||||
}
|
||||
if c.Password == "" {
|
||||
//nolint:staticcheck // Capitalized first word.
|
||||
return errors.New("Uyuni SD configuration requires a password")
|
||||
}
|
||||
return c.HTTPClientConfig.Validate()
|
||||
|
|
|
@ -581,8 +581,8 @@ Dump samples from a TSDB.
|
|||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
||||
|
||||
|
||||
|
@ -608,8 +608,8 @@ Dump samples from a TSDB.
|
|||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` |
|
||||
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` |
|
||||
| <code class="text-nowrap">--match</code> <code class="text-nowrap">...<code class="text-nowrap"> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
|
||||
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ to an external service. Whenever the alert expression results in one or more
|
|||
vector elements at a given point in time, the alert counts as active for these
|
||||
elements' label sets.
|
||||
|
||||
### Defining alerting rules
|
||||
## Defining alerting rules
|
||||
|
||||
Alerting rules are configured in Prometheus in the same way as [recording
|
||||
rules](recording_rules.md).
|
||||
|
@ -54,7 +54,7 @@ values can be templated.
|
|||
|
||||
The `annotations` clause specifies a set of informational labels that can be used to store longer additional information such as alert descriptions or runbook links. The annotation values can be templated.
|
||||
|
||||
#### Templating
|
||||
### Templating
|
||||
|
||||
Label and annotation values can be templated using [console
|
||||
templates](https://prometheus.io/docs/visualization/consoles). The `$labels`
|
||||
|
@ -93,7 +93,7 @@ groups:
|
|||
description: "{{ $labels.instance }} has a median request latency above 1s (current value: {{ $value }}s)"
|
||||
```
|
||||
|
||||
### Inspecting alerts during runtime
|
||||
## Inspecting alerts during runtime
|
||||
|
||||
To manually inspect which alerts are active (pending or firing), navigate to
|
||||
the "Alerts" tab of your Prometheus instance. This will show you the exact
|
||||
|
@ -105,7 +105,7 @@ The sample value is set to `1` as long as the alert is in the indicated active
|
|||
(pending or firing) state, and the series is marked stale when this is no
|
||||
longer the case.
|
||||
|
||||
### Sending alert notifications
|
||||
## Sending alert notifications
|
||||
|
||||
Prometheus's alerting rules are good at figuring what is broken *right now*, but
|
||||
they are not a fully-fledged notification solution. Another layer is needed to
|
||||
|
@ -114,6 +114,6 @@ on top of the simple alert definitions. In Prometheus's ecosystem, the
|
|||
[Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) takes on this
|
||||
role. Thus, Prometheus may be configured to periodically send information about
|
||||
alert states to an Alertmanager instance, which then takes care of dispatching
|
||||
the right notifications.
|
||||
the right notifications.
|
||||
Prometheus can be [configured](configuration.md) to automatically discover available
|
||||
Alertmanager instances through its service discovery integrations.
|
||||
|
|
|
@ -80,9 +80,9 @@ global:
|
|||
[ rule_query_offset: <duration> | default = 0s ]
|
||||
|
||||
# The labels to add to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
# Environment variable references `${var}` or `$var` are replaced according
|
||||
# to the values of the current environment variables.
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
# Environment variable references `${var}` or `$var` are replaced according
|
||||
# to the values of the current environment variables.
|
||||
# References to undefined variables are replaced by the empty string.
|
||||
# The `$` character can be escaped by using `$$`.
|
||||
external_labels:
|
||||
|
@ -138,11 +138,16 @@ global:
|
|||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons,
|
||||
# and underscores.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
[ metric_name_validation_scheme: <string> | default "utf8" ]
|
||||
|
||||
# Specifies whether to convert all scraped classic histograms into native
|
||||
# histograms with custom buckets.
|
||||
[ convert_classic_histograms_to_nhcb <bool> | default = false]
|
||||
[ convert_classic_histograms_to_nhcb: <bool> | default = false]
|
||||
|
||||
# Specifies whether to scrape a classic histogram, even if it is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ always_scrape_classic_histograms: <boolean> | default = false ]
|
||||
|
||||
|
||||
runtime:
|
||||
# Configure the Go garbage collector GOGC parameter
|
||||
|
@ -190,7 +195,7 @@ otlp:
|
|||
# It preserves all special character like dots and won't append special suffixes for metric
|
||||
# unit and type.
|
||||
#
|
||||
# WARNING: The "NoTranslation" setting has significant known risks and limitations (see https://prometheus.io/docs/practices/naming/
|
||||
# WARNING: The "NoTranslation" setting has significant known risks and limitations (see https://prometheus.io/docs/practices/naming/
|
||||
# for details):
|
||||
# * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration).
|
||||
# * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed
|
||||
|
@ -254,7 +259,8 @@ job_name: <job_name>
|
|||
|
||||
# Whether to scrape a classic histogram, even if it is also exposed as a native
|
||||
# histogram (has no effect without --enable-feature=native-histograms).
|
||||
[ always_scrape_classic_histograms: <boolean> | default = false ]
|
||||
[ always_scrape_classic_histograms: <boolean> |
|
||||
default = <global.always_scrape_classic_hisotgrams> ]
|
||||
|
||||
# The HTTP resource path on which to fetch metrics from targets.
|
||||
[ metrics_path: <path> | default = /metrics ]
|
||||
|
@ -478,26 +484,26 @@ metric_relabel_configs:
|
|||
# that will be kept in memory. 0 means no limit.
|
||||
[ keep_dropped_targets: <int> | default = 0 ]
|
||||
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# Specifies the validation scheme for metric and label names. Either blank or
|
||||
# "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and
|
||||
# underscores.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
[ metric_name_validation_scheme: <string> | default "utf8" ]
|
||||
|
||||
# Specifies the character escaping scheme that will be requested when scraping
|
||||
# for metric and label names that do not conform to the legacy Prometheus
|
||||
# character set. Available options are:
|
||||
# character set. Available options are:
|
||||
# * `allow-utf-8`: Full UTF-8 support, no escaping needed.
|
||||
# * `underscores`: Escape all legacy-invalid characters to underscores.
|
||||
# * `dots`: Escapes dots to `_dot_`, underscores to `__`, and all other
|
||||
# legacy-invalid characters to underscores.
|
||||
# * `values`: Prepend the name with `U__` and replace all invalid
|
||||
# characters with their unicode value, surrounded by underscores. Single
|
||||
# underscores are replaced with double underscores.
|
||||
# underscores are replaced with double underscores.
|
||||
# e.g. "U__my_2e_dotted_2e_name".
|
||||
# If this value is left blank, Prometheus will default to `allow-utf-8` if the
|
||||
# validation scheme for the current scrape config is set to utf8, or
|
||||
# `underscores` if the validation scheme is set to `legacy`.
|
||||
[ metric_name_validation_scheme <string> | default "utf8" ]
|
||||
[ metric_name_validation_scheme: <string> | default "utf8" ]
|
||||
|
||||
# Limit on total number of positive and negative buckets allowed in a single
|
||||
# native histogram. The resolution of a histogram with more buckets will be
|
||||
|
@ -511,7 +517,7 @@ metric_relabel_configs:
|
|||
# reduced as much as possible until it is within the limit.
|
||||
# To set an upper limit for the schema (equivalent to "scale" in OTel's
|
||||
# exponential histograms), use the following factor limits:
|
||||
#
|
||||
#
|
||||
# +----------------------------+----------------------------+
|
||||
# | growth factor | resulting schema AKA scale |
|
||||
# +----------------------------+----------------------------+
|
||||
|
@ -541,14 +547,14 @@ metric_relabel_configs:
|
|||
# +----------------------------+----------------------------+
|
||||
# | 1.002 | 8 |
|
||||
# +----------------------------+----------------------------+
|
||||
#
|
||||
#
|
||||
# 0 results in the smallest supported factor (which is currently ~1.0027 or
|
||||
# schema 8, but might change in the future).
|
||||
[ native_histogram_min_bucket_factor: <float> | default = 0 ]
|
||||
|
||||
# Specifies whether to convert classic histograms into native histograms with
|
||||
# custom buckets (has no effect without --enable-feature=native-histograms).
|
||||
[ convert_classic_histograms_to_nhcb <bool> | default =
|
||||
[ convert_classic_histograms_to_nhcb: <bool> | default =
|
||||
<global.convert_classic_histograms_to_nhcb>]
|
||||
```
|
||||
|
||||
|
@ -558,7 +564,7 @@ Where `<job_name>` must be unique across all scrape configurations.
|
|||
|
||||
A `http_config` allows configuring HTTP requests.
|
||||
|
||||
```
|
||||
```yaml
|
||||
# Sets the `Authorization` header on every request with the
|
||||
# configured username and password.
|
||||
# username and username_file are mutually exclusive.
|
||||
|
@ -789,7 +795,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_consul_address`: the address of the target
|
||||
* `__meta_consul_dc`: the datacenter name for the target
|
||||
* `__meta_consul_health`: the health status of the service
|
||||
* `__meta_consul_partition`: the admin partition name where the service is registered
|
||||
* `__meta_consul_partition`: the admin partition name where the service is registered
|
||||
* `__meta_consul_metadata_<key>`: each node metadata key value of the target
|
||||
* `__meta_consul_node`: the node name defined for the target
|
||||
* `__meta_consul_service_address`: the service address of the target
|
||||
|
@ -936,7 +942,7 @@ host: <string>
|
|||
[ host_networking_host: <string> | default = "localhost" ]
|
||||
|
||||
# Sort all non-nil networks in ascending order based on network name and
|
||||
# get the first network if the container has multiple networks defined,
|
||||
# get the first network if the container has multiple networks defined,
|
||||
# thus avoiding collecting duplicate targets.
|
||||
[ match_first_network: <boolean> | default = true ]
|
||||
|
||||
|
@ -1252,7 +1258,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
|
||||
#### `loadbalancer`
|
||||
|
||||
The `loadbalancer` role discovers one target per Octavia loadbalancer with a
|
||||
The `loadbalancer` role discovers one target per Octavia loadbalancer with a
|
||||
`PROMETHEUS` listener. The target address defaults to the VIP address
|
||||
of the load balancer.
|
||||
|
||||
|
@ -1465,7 +1471,7 @@ and serves as an interface to plug in custom service discovery mechanisms.
|
|||
|
||||
It reads a set of files containing a list of zero or more
|
||||
`<static_config>`s. Changes to all defined files are detected via disk watches
|
||||
and applied immediately.
|
||||
and applied immediately.
|
||||
|
||||
While those individual files are watched for changes,
|
||||
the parent directory is also watched implicitly. This is to handle [atomic
|
||||
|
@ -1638,6 +1644,10 @@ role: <string>
|
|||
# The time after which the servers are refreshed.
|
||||
[ refresh_interval: <duration> | default = 60s ]
|
||||
|
||||
# Label selector used to filter the servers when fetching them from the API. See https://docs.hetzner.cloud/#label-selector for more details.
|
||||
# Only used when role is hcloud.
|
||||
[ label_selector: <string> ]
|
||||
|
||||
# HTTP client settings, including authentication methods (such as basic auth and
|
||||
# authorization), proxy configurations, TLS options, custom HTTP headers, etc.
|
||||
[ <http_config> ]
|
||||
|
@ -1974,7 +1984,7 @@ See below for the configuration options for Kuma MonitoringAssignment discovery:
|
|||
# Address of the Kuma Control Plane's MADS xDS server.
|
||||
server: <string>
|
||||
|
||||
# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend.
|
||||
# Client id is used by Kuma Control Plane to compute Monitoring Assignment for specific Prometheus backend.
|
||||
# This is useful when migrating between multiple Prometheus backends, or having separate backend for each Mesh.
|
||||
# When not specified, system hostname/fqdn will be used if available, if not `prometheus` will be used.
|
||||
[ client_id: <string> ]
|
||||
|
@ -2072,7 +2082,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
* `__meta_linode_status`: the status of the linode instance
|
||||
* `__meta_linode_tags`: a list of tags of the linode instance joined by the tag separator
|
||||
* `__meta_linode_group`: the display group a linode instance is a member of
|
||||
* `__meta_linode_gpus`: the number of GPU's of the linode instance
|
||||
* `__meta_linode_gpus`: the number of GPU's of the linode instance
|
||||
* `__meta_linode_hypervisor`: the virtualization software powering the linode instance
|
||||
* `__meta_linode_backups`: the backup service status of the linode instance
|
||||
* `__meta_linode_specs_disk_bytes`: the amount of storage space the linode instance has access to
|
||||
|
@ -2593,7 +2603,7 @@ input to a subsequent relabeling step), use the `__tmp` label name prefix. This
|
|||
prefix is guaranteed to never be used by Prometheus itself.
|
||||
|
||||
```yaml
|
||||
# The source_labels tells the rule what labels to fetch from the series. Any
|
||||
# The source_labels tells the rule what labels to fetch from the series. Any
|
||||
# labels which do not exist get a blank value (""). Their content is concatenated
|
||||
# using the configured separator and matched against the configured regular expression
|
||||
# for the replace, keep, and drop actions.
|
||||
|
@ -2884,7 +2894,7 @@ write_relabel_configs:
|
|||
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
|
||||
[ send_native_histograms: <boolean> | default = false ]
|
||||
|
||||
# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it.
|
||||
# When enabled, remote-write will resolve the URL host name via DNS, choose one of the IP addresses at random, and connect to it.
|
||||
# When disabled, remote-write relies on Go's standard behavior, which is to try to connect to each address in turn.
|
||||
# The connection timeout applies to the whole operation, i.e. in the latter case it is spread over all attempt.
|
||||
# This is an experimental feature, and its behavior might still change, or even get removed.
|
||||
|
@ -2917,7 +2927,7 @@ azuread:
|
|||
|
||||
# Azure User-assigned Managed identity.
|
||||
[ managed_identity:
|
||||
[ client_id: <string> ] ]
|
||||
[ client_id: <string> ] ]
|
||||
|
||||
# Azure OAuth.
|
||||
[ oauth:
|
||||
|
@ -3045,8 +3055,8 @@ with this feature.
|
|||
# that is within the out-of-order window, or (b) too-old, i.e. not in-order
|
||||
# and before the out-of-order window.
|
||||
#
|
||||
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
|
||||
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
|
||||
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
|
||||
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
|
||||
# to the timestamp of the last appended sample for the same series.
|
||||
[ out_of_order_time_window: <duration> | default = 0s ]
|
||||
```
|
||||
|
|
|
@ -27,7 +27,7 @@ Generic placeholders are defined as follows:
|
|||
|
||||
A valid example file can be found [here](/documentation/examples/web-config.yml).
|
||||
|
||||
```
|
||||
```yaml
|
||||
tls_server_config:
|
||||
# Certificate and key files for server to use to authenticate to client.
|
||||
cert_file: <filename>
|
||||
|
|
|
@ -34,7 +34,7 @@ When the file is syntactically valid, the checker prints a textual
|
|||
representation of the parsed rules to standard output and then exits with
|
||||
a `0` return status.
|
||||
|
||||
If there are any syntax errors or invalid input arguments, it prints an error
|
||||
If there are any syntax errors or invalid input arguments, it prints an error
|
||||
message to standard error and exits with a `1` return status.
|
||||
|
||||
## Recording rules
|
||||
|
@ -71,7 +71,8 @@ groups:
|
|||
```
|
||||
|
||||
### `<rule_group>`
|
||||
```
|
||||
|
||||
```yaml
|
||||
# The name of the group. Must be unique within a file.
|
||||
name: <string>
|
||||
|
||||
|
@ -98,7 +99,7 @@ rules:
|
|||
|
||||
The syntax for recording rules is:
|
||||
|
||||
```
|
||||
```yaml
|
||||
# The name of the time series to output to. Must be a valid metric name.
|
||||
record: <string>
|
||||
|
||||
|
@ -114,7 +115,7 @@ labels:
|
|||
|
||||
The syntax for alerting rules is:
|
||||
|
||||
```
|
||||
```yaml
|
||||
# The name of the alert. Must be a valid label value.
|
||||
alert: <string>
|
||||
|
||||
|
@ -143,7 +144,7 @@ annotations:
|
|||
See also the
|
||||
[best practices for naming metrics created by recording rules](https://prometheus.io/docs/practices/rules/#recording-rules).
|
||||
|
||||
# Limiting alerts and series
|
||||
## Limiting alerts and series
|
||||
|
||||
A limit for alerts produced by alerting rules and series produced recording rules
|
||||
can be configured per-group. When the limit is exceeded, _all_ series produced
|
||||
|
@ -152,9 +153,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be
|
|||
recorded as an error in the evaluation, and as such no stale markers are
|
||||
written.
|
||||
|
||||
# Rule query offset
|
||||
## Rule query offset
|
||||
This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals.
|
||||
|
||||
# Failed rule evaluations due to slow evaluation
|
||||
## Failed rule evaluations due to slow evaluation
|
||||
|
||||
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.
|
||||
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.
|
||||
|
|
|
@ -13,7 +13,7 @@ templating](https://golang.org/pkg/text/template/) system.
|
|||
|
||||
## Simple alert field templates
|
||||
|
||||
```
|
||||
```yaml
|
||||
alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 5m
|
||||
|
@ -33,7 +33,7 @@ console instead.
|
|||
|
||||
This displays a list of instances, and whether they are up:
|
||||
|
||||
```go
|
||||
```
|
||||
{{ range query "up" }}
|
||||
{{ .Labels.instance }} {{ .Value }}
|
||||
{{ end }}
|
||||
|
@ -43,7 +43,7 @@ The special `.` variable contains the value of the current sample for each loop
|
|||
|
||||
## Display one value
|
||||
|
||||
```go
|
||||
```
|
||||
{{ with query "some_metric{instance='someinstance'}" }}
|
||||
{{ . | first | value | humanize }}
|
||||
{{ end }}
|
||||
|
@ -58,7 +58,7 @@ formatting of results, and linking to the [expression browser](https://prometheu
|
|||
|
||||
## Using console URL parameters
|
||||
|
||||
```go
|
||||
```
|
||||
{{ with printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance | query }}
|
||||
{{ . | first | value | humanize1024 }}B
|
||||
{{ end }}
|
||||
|
@ -95,7 +95,7 @@ powerful when combined with
|
|||
[console library](template_reference.md#console-templates) support, allowing
|
||||
sharing of templates across consoles.
|
||||
|
||||
```go
|
||||
```
|
||||
{{/* Define the template */}}
|
||||
{{define "myTemplate"}}
|
||||
do something
|
||||
|
@ -107,7 +107,7 @@ sharing of templates across consoles.
|
|||
|
||||
Templates are limited to one argument. The `args` function can be used to wrap multiple arguments.
|
||||
|
||||
```go
|
||||
```
|
||||
{{define "myMultiArgTemplate"}}
|
||||
First argument: {{.arg0}}
|
||||
Second argument: {{.arg1}}
|
||||
|
|
|
@ -17,8 +17,8 @@ The primary data structure for dealing with time series data is the sample, defi
|
|||
|
||||
```go
|
||||
type sample struct {
|
||||
Labels map[string]string
|
||||
Value interface{}
|
||||
Labels map[string]string
|
||||
Value interface{}
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -24,6 +24,10 @@ rule_files:
|
|||
|
||||
[ evaluation_interval: <duration> | default = 1m ]
|
||||
|
||||
# Setting fuzzy_compare true will very slightly weaken floating point comparisons.
|
||||
# This will (effectively) ignore differences in the last bit of the mantissa.
|
||||
[ fuzzy_compare: <boolean> | default = false ]
|
||||
|
||||
# The order in which group names are listed below will be the order of evaluation of
|
||||
# rule groups (at a given evaluation time). The order is guaranteed only for the groups mentioned below.
|
||||
# All the groups need not be mentioned below.
|
||||
|
@ -95,20 +99,20 @@ series: <string>
|
|||
# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}}
|
||||
# Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'.
|
||||
# All properties are optional and default to 0. The order is not important. The following properties are supported:
|
||||
# - schema (int):
|
||||
# - schema (int):
|
||||
# Currently valid schema numbers are -4 <= n <= 8. They are all for
|
||||
# base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
# then each power of two is divided into 2^n logarithmic buckets. Or
|
||||
# in other words, each bucket boundary is the previous boundary times
|
||||
# 2^(2^-n).
|
||||
# - sum (float):
|
||||
# - sum (float):
|
||||
# The sum of all observations, including the zero bucket.
|
||||
# - count (non-negative float):
|
||||
# - count (non-negative float):
|
||||
# The number of observations, including those that are NaN and including the zero bucket.
|
||||
# - z_bucket (non-negative float):
|
||||
# - z_bucket (non-negative float):
|
||||
# The sum of all observations in the zero bucket.
|
||||
# - z_bucket_w (non-negative float):
|
||||
# The width of the zero bucket.
|
||||
# - z_bucket_w (non-negative float):
|
||||
# The width of the zero bucket.
|
||||
# If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w.
|
||||
# Otherwise, the zero bucket only contains observations that are exactly 0.
|
||||
# - buckets (list of non-negative floats):
|
||||
|
|
|
@ -23,7 +23,7 @@ Exemplar storage is implemented as a fixed size circular buffer that stores exem
|
|||
|
||||
`--enable-feature=memory-snapshot-on-shutdown`
|
||||
|
||||
This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
|
||||
This takes a snapshot of the chunks that are in memory along with the series information when shutting down and stores it on disk. This will reduce the startup time since the memory state can now be restored with this snapshot
|
||||
and m-mapped chunks, while a WAL replay from disk is only needed for the parts of the WAL that are not part of the snapshot.
|
||||
|
||||
## Extra scrape metrics
|
||||
|
@ -183,7 +183,7 @@ This state is periodically ([`max_stale`][d2c]) cleared of inactive series.
|
|||
Enabling this _can_ have negative impact on performance, because the in-memory
|
||||
state is mutex guarded. Cumulative-only OTLP requests are not affected.
|
||||
|
||||
### PromQL arithmetic expressions in time durations
|
||||
## PromQL arithmetic expressions in time durations
|
||||
|
||||
`--enable-feature=promql-duration-expr`
|
||||
|
||||
|
@ -203,7 +203,7 @@ The following operators are supported:
|
|||
|
||||
* `+` - addition
|
||||
* `-` - subtraction
|
||||
* `*` - multiplication
|
||||
* `*` - multiplication
|
||||
* `/` - division
|
||||
* `%` - modulo
|
||||
* `^` - exponentiation
|
||||
|
@ -227,7 +227,7 @@ When enabled, allows for the native ingestion of delta OTLP metrics, storing the
|
|||
|
||||
Currently, the StartTimeUnixNano field is ignored, and deltas are given the unknown metric metadata type.
|
||||
|
||||
Delta support is in a very early stage of development and the ingestion and querying process my change over time. For the open proposal see [prometheus/proposals#48](https://github.com/prometheus/proposals/pull/48).
|
||||
Delta support is in a very early stage of development and the ingestion and querying process my change over time. For the open proposal see [prometheus/proposals#48](https://github.com/prometheus/proposals/pull/48).
|
||||
|
||||
### Querying
|
||||
|
||||
|
@ -246,4 +246,4 @@ These may not work well if the `<range>` is not a multiple of the collection int
|
|||
|
||||
* It is difficult to figure out whether a metric has delta or cumulative temporality, since there's no indication of temporality in metric names or labels. For now, if you are ingesting a mix of delta and cumulative metrics we advise you to explicitly add your own labels to distinguish them. In the future, we plan to introduce type labels to consistently distinguish metric types and potentially make PromQL functions type-aware (e.g. providing warnings when cumulative-only functions are used with delta metrics).
|
||||
|
||||
* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus.
|
||||
* If there are multiple samples being ingested at the same timestamp, only one of the points is kept - the samples are **not** summed together (this is how Prometheus works in general - duplicate timestamp samples are rejected). Any aggregation will have to be done before sending samples to Prometheus.
|
||||
|
|
|
@ -200,7 +200,7 @@ To record the time series resulting from this expression into a new metric
|
|||
called `job_instance_mode:node_cpu_seconds:avg_rate5m`, create a file
|
||||
with the following recording rule and save it as `prometheus.rules.yml`:
|
||||
|
||||
```
|
||||
```yaml
|
||||
groups:
|
||||
- name: cpu-node
|
||||
rules:
|
||||
|
|
|
@ -11,52 +11,52 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0
|
|||
|
||||
## Flags
|
||||
|
||||
- The following feature flags have been removed and they have been added to the
|
||||
- The following feature flags have been removed and they have been added to the
|
||||
default behavior of Prometheus v3:
|
||||
- `promql-at-modifier`
|
||||
- `promql-negative-offset`
|
||||
- `new-service-discovery-manager`
|
||||
- `expand-external-labels`
|
||||
- Environment variable references `${var}` or `$var` in external label values
|
||||
are replaced according to the values of the current environment variables.
|
||||
- Environment variable references `${var}` or `$var` in external label values
|
||||
are replaced according to the values of the current environment variables.
|
||||
- References to undefined variables are replaced by the empty string.
|
||||
The `$` character can be escaped by using `$$`.
|
||||
- `no-default-scrape-port`
|
||||
- Prometheus v3 will no longer add ports to scrape targets according to the
|
||||
- Prometheus v3 will no longer add ports to scrape targets according to the
|
||||
specified scheme. Target will now appear in labels as configured.
|
||||
- If you rely on scrape targets like
|
||||
`https://example.com/metrics` or `http://example.com/metrics` to be
|
||||
represented as `https://example.com/metrics:443` and
|
||||
- If you rely on scrape targets like
|
||||
`https://example.com/metrics` or `http://example.com/metrics` to be
|
||||
represented as `https://example.com/metrics:443` and
|
||||
`http://example.com/metrics:80` respectively, add them to your target URLs
|
||||
- `agent`
|
||||
- Instead use the dedicated `--agent` CLI flag.
|
||||
- `remote-write-receiver`
|
||||
- Instead use the dedicated `--web.enable-remote-write-receiver` CLI flag to enable the remote write receiver.
|
||||
- `auto-gomemlimit`
|
||||
- Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux
|
||||
container memory limit. If there is no container limit, or the process is
|
||||
running outside of containers, the system memory total is used. To disable
|
||||
- Prometheus v3 will automatically set `GOMEMLIMIT` to match the Linux
|
||||
container memory limit. If there is no container limit, or the process is
|
||||
running outside of containers, the system memory total is used. To disable
|
||||
this, `--no-auto-gomemlimit` is available.
|
||||
- `auto-gomaxprocs`
|
||||
- Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux
|
||||
- Prometheus v3 will automatically set `GOMAXPROCS` to match the Linux
|
||||
container CPU quota. To disable this, `--no-auto-gomaxprocs` is available.
|
||||
|
||||
Prometheus v3 will log a warning if you continue to pass these to
|
||||
Prometheus v3 will log a warning if you continue to pass these to
|
||||
`--enable-feature`.
|
||||
|
||||
## Configuration
|
||||
|
||||
- The scrape job level configuration option `scrape_classic_histograms` has been
|
||||
renamed to `always_scrape_classic_histograms`. If you use the
|
||||
`--enable-feature=native-histograms` feature flag to ingest native histograms
|
||||
and you also want to ingest classic histograms that an endpoint might expose
|
||||
along with native histograms, be sure to add this configuration or change your
|
||||
- The scrape job level configuration option `scrape_classic_histograms` has been
|
||||
renamed to `always_scrape_classic_histograms`. If you use the
|
||||
`--enable-feature=native-histograms` feature flag to ingest native histograms
|
||||
and you also want to ingest classic histograms that an endpoint might expose
|
||||
along with native histograms, be sure to add this configuration or change your
|
||||
configuration from the old name.
|
||||
- The `http_config.enable_http2` in `remote_write` items default has been
|
||||
changed to `false`. In Prometheus v2 the remote write http client would
|
||||
default to use http2. In order to parallelize multiple remote write queues
|
||||
- The `http_config.enable_http2` in `remote_write` items default has been
|
||||
changed to `false`. In Prometheus v2 the remote write http client would
|
||||
default to use http2. In order to parallelize multiple remote write queues
|
||||
across multiple sockets its preferable to not default to http2.
|
||||
If you prefer to use http2 for remote write you must now set
|
||||
If you prefer to use http2 for remote write you must now set
|
||||
`http_config.enable_http2: true` in your `remote_write` configuration section.
|
||||
|
||||
## PromQL
|
||||
|
@ -137,7 +137,7 @@ may now fail if this fallback protocol is not specified.
|
|||
|
||||
### TSDB format and downgrade
|
||||
|
||||
The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes
|
||||
The TSDB format has been changed slightly in Prometheus v2.55 in preparation for changes
|
||||
to the index format. Consequently, a Prometheus v3 TSDB can only be read by a
|
||||
Prometheus v2.55 or newer. Keep that in mind when upgrading to v3 -- you will be only
|
||||
able to downgrade to v2.55, not lower, without losing your TSDB persistent data.
|
||||
|
@ -147,8 +147,8 @@ confirm Prometheus works as expected, before upgrading to v3.
|
|||
|
||||
### TSDB storage contract
|
||||
|
||||
TSDB compatible storage is now expected to return results matching the specified
|
||||
selectors. This might impact some third party implementations, most likely
|
||||
TSDB compatible storage is now expected to return results matching the specified
|
||||
selectors. This might impact some third party implementations, most likely
|
||||
implementing `remote_read`.
|
||||
|
||||
This contract is not explicitly enforced, but can cause undefined behavior.
|
||||
|
@ -179,7 +179,7 @@ scrape_configs:
|
|||
```
|
||||
|
||||
### Log message format
|
||||
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
|
||||
Prometheus v3 has adopted `log/slog` over the previous `go-kit/log`. This
|
||||
results in a change of log message format. An example of the old log format is:
|
||||
|
||||
```
|
||||
|
@ -198,19 +198,19 @@ time=2024-10-24T00:03:07.542+02:00 level=INFO source=/home/user/go/src/github.co
|
|||
```
|
||||
|
||||
### `le` and `quantile` label values
|
||||
In Prometheus v3, the values of the `le` label of classic histograms and the
|
||||
In Prometheus v3, the values of the `le` label of classic histograms and the
|
||||
`quantile` label of summaries are normalized upon ingestion. In Prometheus v2
|
||||
the value of these labels depended on the scrape protocol (protobuf vs text
|
||||
format) in some situations. This led to label values changing based on the
|
||||
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
|
||||
ingested as `my_classic_hist{le="1"}` via the text format, but as
|
||||
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
|
||||
the value of these labels depended on the scrape protocol (protobuf vs text
|
||||
format) in some situations. This led to label values changing based on the
|
||||
scrape protocol. E.g. a metric exposed as `my_classic_hist{le="1"}` would be
|
||||
ingested as `my_classic_hist{le="1"}` via the text format, but as
|
||||
`my_classic_hist{le="1.0"}` via protobuf. This changed the identity of the
|
||||
metric and caused problems when querying the metric.
|
||||
In Prometheus v3 these label values will always be normalized to a float like
|
||||
representation. I.e. the above example will always result in
|
||||
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
|
||||
protocol. The effect of this change is that alerts, recording rules and
|
||||
dashboards that directly reference label values as whole numbers such as
|
||||
In Prometheus v3 these label values will always be normalized to a float like
|
||||
representation. I.e. the above example will always result in
|
||||
`my_classic_hist{le="1.0"}` being ingested into prometheus, no matter via which
|
||||
protocol. The effect of this change is that alerts, recording rules and
|
||||
dashboards that directly reference label values as whole numbers such as
|
||||
`le="1"` will stop working.
|
||||
|
||||
Ways to deal with this change either globally or on a per metric basis:
|
||||
|
@ -236,11 +236,11 @@ This should **only** be applied to metrics that currently produce such labels.
|
|||
```
|
||||
|
||||
### Disallow configuring Alertmanager with the v1 API
|
||||
Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3
|
||||
Prometheus 3 no longer supports Alertmanager's v1 API. Effectively Prometheus 3
|
||||
requires [Alertmanager 0.16.0](https://github.com/prometheus/alertmanager/releases/tag/v0.16.0) or later. Users with older Alertmanager
|
||||
versions or configurations that use `alerting: alertmanagers: [api_version: v1]`
|
||||
versions or configurations that use `alerting: alertmanagers: [api_version: v1]`
|
||||
need to upgrade Alertmanager and change their configuration to use `api_version: v2`.
|
||||
|
||||
# Prometheus 2.0 migration guide
|
||||
## Prometheus 2.0 migration guide
|
||||
|
||||
For the Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).
|
||||
For the migration guide from Prometheus 1.8 to 2.0 please refer to the [Prometheus v2.55 documentation](https://prometheus.io/docs/prometheus/2.55/migration/).
|
||||
|
|
|
@ -32,7 +32,7 @@ will be returned in the data field.
|
|||
|
||||
The JSON response envelope format is as follows:
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"status": "success" | "error",
|
||||
"data": <data>,
|
||||
|
@ -96,7 +96,7 @@ query that may breach server-side URL character limits.
|
|||
|
||||
The `data` section of the query result has the following format:
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"resultType": "matrix" | "vector" | "scalar" | "string",
|
||||
"result": <value>
|
||||
|
@ -110,8 +110,11 @@ formats](#expression-query-result-formats).
|
|||
The following example evaluates the expression `up` at the time
|
||||
`2015-07-01T20:10:51.781Z`:
|
||||
|
||||
```bash
|
||||
curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z'
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : {
|
||||
|
@ -163,7 +166,7 @@ query that may breach server-side URL character limits.
|
|||
|
||||
The `data` section of the query result has the following format:
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"resultType": "matrix",
|
||||
"result": <value>
|
||||
|
@ -176,8 +179,11 @@ format](#range-vectors).
|
|||
The following example evaluates the expression `up` over a 30-second range with
|
||||
a query resolution of 15 seconds.
|
||||
|
||||
```bash
|
||||
curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/query_range?query=up&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s'
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : {
|
||||
|
@ -233,8 +239,11 @@ The `data` section of the query result is a string containing the formatted quer
|
|||
|
||||
The following example formats the expression `foo/bar`:
|
||||
|
||||
```bash
|
||||
curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/format_query?query=foo/bar'
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : "foo / bar"
|
||||
|
@ -264,8 +273,11 @@ The `data` section of the query result is a string containing the AST of the par
|
|||
|
||||
The following example parses the expression `foo/bar`:
|
||||
|
||||
```bash
|
||||
curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/parse_query?query=foo/bar'
|
||||
{
|
||||
"data" : {
|
||||
"bool" : false,
|
||||
|
@ -343,8 +355,11 @@ contain the label name/value pairs which identify each series.
|
|||
The following example returns all series that match either of the selectors
|
||||
`up` or `process_start_time_seconds{job="prometheus"}`:
|
||||
|
||||
```bash
|
||||
curl -g 'http://localhost:9090/api/v1/series?' --data-urlencode 'match[]=up' --data-urlencode 'match[]=process_start_time_seconds{job="prometheus"}'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl -g 'http://localhost:9090/api/v1/series?' --data-urlencode 'match[]=up' --data-urlencode 'match[]=process_start_time_seconds{job="prometheus"}'
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : [
|
||||
|
@ -389,8 +404,11 @@ The `data` section of the JSON response is a list of string label names.
|
|||
|
||||
Here is an example.
|
||||
|
||||
```bash
|
||||
curl 'localhost:9090/api/v1/labels'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'localhost:9090/api/v1/labels'
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
@ -439,8 +457,11 @@ The `data` section of the JSON response is a list of string label values.
|
|||
|
||||
This example queries for all label values for the `http_status_code` label:
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/label/http_status_code/values
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/label/http_status_code/values
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : [
|
||||
|
@ -462,8 +483,11 @@ Label names can optionally be encoded using the Values Escaping method, and is n
|
|||
|
||||
This example queries for all label values for the `http.status_code` label:
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/label/U__http_2e_status_code/values
|
||||
{
|
||||
"status" : "success",
|
||||
"data" : [
|
||||
|
@ -489,8 +513,11 @@ URL query parameters:
|
|||
- `start=<rfc3339 | unix_timestamp>`: Start timestamp.
|
||||
- `end=<rfc3339 | unix_timestamp>`: End timestamp.
|
||||
|
||||
```bash
|
||||
curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl -g 'http://localhost:9090/api/v1/query_exemplars?query=test_exemplar_metric_total&start=2020-09-14T15:22:25.479Z&end=2020-09-14T15:23:25.479Z'
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
@ -556,7 +583,7 @@ is explained in detail in its own section below.
|
|||
Range vectors are returned as result type `matrix`. The corresponding
|
||||
`result` property has the following format:
|
||||
|
||||
```
|
||||
```json
|
||||
[
|
||||
{
|
||||
"metric": { "<label_name>": "<label_value>", ... },
|
||||
|
@ -578,7 +605,7 @@ and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vecto
|
|||
Instant vectors are returned as result type `vector`. The corresponding
|
||||
`result` property has the following format:
|
||||
|
||||
```
|
||||
```json
|
||||
[
|
||||
{
|
||||
"metric": { "<label_name>": "<label_value>", ... },
|
||||
|
@ -600,7 +627,7 @@ is used.
|
|||
Scalar results are returned as result type `scalar`. The corresponding
|
||||
`result` property has the following format:
|
||||
|
||||
```
|
||||
```json
|
||||
[ <unix_time>, "<scalar_value>" ]
|
||||
```
|
||||
|
||||
|
@ -609,7 +636,7 @@ Scalar results are returned as result type `scalar`. The corresponding
|
|||
String results are returned as result type `string`. The corresponding
|
||||
`result` property has the following format:
|
||||
|
||||
```
|
||||
```json
|
||||
[ <unix_time>, "<string_value>" ]
|
||||
```
|
||||
|
||||
|
@ -620,7 +647,7 @@ The `<histogram>` placeholder used above is formatted as follows.
|
|||
_Note that native histograms are an experimental feature, and the format below
|
||||
might still change._
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"count": "<count_of_observations>",
|
||||
"sum": "<sum_of_observations>",
|
||||
|
@ -654,8 +681,11 @@ Dropped targets are subject to `keep_dropped_targets` limit, if set.
|
|||
`labels` represents the label set after relabeling has occurred.
|
||||
`discoveredLabels` represent the unmodified labels retrieved during service discovery before relabeling has occurred.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/targets
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/targets
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -704,9 +734,12 @@ The `state` query parameter allows the caller to filter by active or dropped tar
|
|||
Note that an empty array is still returned for targets that are filtered out.
|
||||
Other values are ignored.
|
||||
|
||||
```bash
|
||||
curl 'http://localhost:9090/api/v1/targets?state=active'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/targets?state=active'
|
||||
{
|
||||
|
||||
"status": "success",
|
||||
"data": {
|
||||
"activeTargets": [
|
||||
|
@ -737,9 +770,12 @@ $ curl 'http://localhost:9090/api/v1/targets?state=active'
|
|||
|
||||
The `scrapePool` query parameter allows the caller to filter by scrape pool name.
|
||||
|
||||
```bash
|
||||
curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter'
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl 'http://localhost:9090/api/v1/targets?scrapePool=node_exporter'
|
||||
{
|
||||
|
||||
"status": "success",
|
||||
"data": {
|
||||
"activeTargets": [
|
||||
|
@ -792,9 +828,11 @@ URL query parameters:
|
|||
- `group_limit=<number>`: The `group_limit` parameter allows you to specify a limit for the number of rule groups that is returned in a single response. If the total number of rule groups exceeds the specified `group_limit` value, the response will include a `groupNextToken` property. You can use the value of this `groupNextToken` property in subsequent requests in the `group_next_token` parameter to paginate over the remaining rule groups. The `groupNextToken` property will not be present in the final response, indicating that you have retrieved all the available rule groups. Please note that there are no guarantees regarding the consistency of the response if the rule groups are being modified during the pagination process.
|
||||
- `group_next_token`: the pagination token that was returned in previous request when the `group_limit` property is set. The pagination token is used to iteratively paginate over a large number of rule groups. To use the `group_next_token` parameter, the `group_limit` parameter also need to be present. If a rule group that coincides with the next token is removed while you are paginating over the rule groups, a response with status code 400 will be returned.
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/rules
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/rules
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"groups": [
|
||||
|
@ -857,9 +895,11 @@ guarantees as the overarching API v1.
|
|||
GET /api/v1/alerts
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/alerts
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/alerts
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"alerts": [
|
||||
|
@ -904,6 +944,9 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
|
|||
--data-urlencode 'metric=go_goroutines' \
|
||||
--data-urlencode 'match_target={job="prometheus"}' \
|
||||
--data-urlencode 'limit=2'
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
@ -932,9 +975,12 @@ curl -G http://localhost:9091/api/v1/targets/metadata \
|
|||
The following example returns metadata for all metrics for all targets with
|
||||
label `instance="127.0.0.1:9090"`.
|
||||
|
||||
```json
|
||||
```bash
|
||||
curl -G http://localhost:9091/api/v1/targets/metadata \
|
||||
--data-urlencode 'match_target={instance="127.0.0.1:9090"}'
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
@ -983,9 +1029,11 @@ The `data` section of the query result consists of an object where each key is a
|
|||
|
||||
The following example returns two metrics. Note that the metric `http_requests_total` has more than one object in the list. At least one target has a value for `HELP` that do not match with the rest.
|
||||
|
||||
```json
|
||||
```bash
|
||||
curl -G http://localhost:9090/api/v1/metadata?limit=2
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1014,9 +1062,11 @@ curl -G http://localhost:9090/api/v1/metadata?limit=2
|
|||
|
||||
The following example returns only one metadata entry for each metric.
|
||||
|
||||
```json
|
||||
```bash
|
||||
curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1040,9 +1090,11 @@ curl -G http://localhost:9090/api/v1/metadata?limit_per_metric=1
|
|||
|
||||
The following example returns metadata only for the metric `http_requests_total`.
|
||||
|
||||
```json
|
||||
```bash
|
||||
curl -G http://localhost:9090/api/v1/metadata?metric=http_requests_total
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1073,8 +1125,11 @@ GET /api/v1/alertmanagers
|
|||
|
||||
Both the active and dropped Alertmanagers are part of the response.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/alertmanagers
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/alertmanagers
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1107,8 +1162,11 @@ GET /api/v1/status/config
|
|||
The config is returned as dumped YAML file. Due to limitation of the YAML
|
||||
library, YAML comments are not included.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/status/config
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/config
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1127,8 +1185,11 @@ GET /api/v1/status/flags
|
|||
|
||||
All values are of the result type `string`.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/status/flags
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/flags
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1154,8 +1215,11 @@ GET /api/v1/status/runtimeinfo
|
|||
|
||||
The returned values are of different types, depending on the nature of the runtime property.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/status/runtimeinfo
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/runtimeinfo
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1190,8 +1254,11 @@ GET /api/v1/status/buildinfo
|
|||
|
||||
All values are of the result type `string`.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/status/buildinfo
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/buildinfo
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1232,8 +1299,11 @@ The `data` section of the query result consists of:
|
|||
- **memoryInBytesByLabelName** This will provide a list of the label names and memory used in bytes. Memory usage is calculated by adding the length of all values for a given label name.
|
||||
- **seriesCountByLabelPair** This will provide a list of label value pairs and their series count.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/status/tsdb
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/tsdb
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1305,8 +1375,11 @@ GET /api/v1/status/walreplay
|
|||
- **in progress**: The replay is in progress.
|
||||
- **done**: The replay has finished.
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/status/walreplay
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl http://localhost:9090/api/v1/status/walreplay
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1338,8 +1411,11 @@ URL query parameters:
|
|||
|
||||
- `skip_head=<bool>`: Skip data present in the head block. Optional.
|
||||
|
||||
```bash
|
||||
curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot
|
||||
```
|
||||
|
||||
```json
|
||||
$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/snapshot
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
|
@ -1371,8 +1447,8 @@ Not mentioning both start and end times would clear all the data for the matched
|
|||
|
||||
Example:
|
||||
|
||||
```json
|
||||
$ curl -X POST \
|
||||
```bash
|
||||
curl -X POST \
|
||||
-g 'http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up&match[]=process_start_time_seconds{job="prometheus"}'
|
||||
```
|
||||
|
||||
|
@ -1392,8 +1468,8 @@ PUT /api/v1/admin/tsdb/clean_tombstones
|
|||
|
||||
This takes no parameters or body.
|
||||
|
||||
```json
|
||||
$ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones
|
||||
```bash
|
||||
curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones
|
||||
```
|
||||
|
||||
*New in v2.1 and supports PUT from v2.9*
|
||||
|
@ -1451,8 +1527,11 @@ GET /api/v1/notifications
|
|||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/notifications
|
||||
```
|
||||
|
||||
```
|
||||
$ curl http://localhost:9090/api/v1/notifications
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
@ -1477,8 +1556,11 @@ GET /api/v1/notifications/live
|
|||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl http://localhost:9090/api/v1/notifications/live
|
||||
```
|
||||
|
||||
```
|
||||
$ curl http://localhost:9090/api/v1/notifications/live
|
||||
data: {
|
||||
"status": "success",
|
||||
"data": [
|
||||
|
|
|
@ -61,10 +61,10 @@ A Prometheus server's data directory looks something like this:
|
|||
Note that a limitation of local storage is that it is not clustered or
|
||||
replicated. Thus, it is not arbitrarily scalable or durable in the face of
|
||||
drive or node outages and should be managed like any other single node
|
||||
database.
|
||||
database.
|
||||
|
||||
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
|
||||
made without snapshots run the risk of losing data that was recorded since
|
||||
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
|
||||
made without snapshots run the risk of losing data that was recorded since
|
||||
the last WAL sync, which typically happens every two hours. With proper
|
||||
architecture, it is possible to retain years of data in local storage.
|
||||
|
||||
|
@ -75,14 +75,14 @@ performance, and efficiency.
|
|||
|
||||
For further details on file format, see [TSDB format](/tsdb/docs/format/README.md).
|
||||
|
||||
## Compaction
|
||||
### Compaction
|
||||
|
||||
The initial two-hour blocks are eventually compacted into longer blocks in the background.
|
||||
|
||||
Compaction will create larger blocks containing data spanning up to 10% of the retention time,
|
||||
or 31 days, whichever is smaller.
|
||||
|
||||
## Operational aspects
|
||||
### Operational aspects
|
||||
|
||||
Prometheus has several flags that configure local storage. The most important are:
|
||||
|
||||
|
@ -134,16 +134,16 @@ will be used.
|
|||
Expired block cleanup happens in the background. It may take up to two hours
|
||||
to remove expired blocks. Blocks must be fully expired before they are removed.
|
||||
|
||||
## Right-Sizing Retention Size
|
||||
### Right-Sizing Retention Size
|
||||
|
||||
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
|
||||
will want to consider the right size for this value relative to the storage you
|
||||
have allocated for Prometheus. It is wise to reduce the retention size to provide
|
||||
a buffer, ensuring that older entries will be removed before the allocated storage
|
||||
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
|
||||
will want to consider the right size for this value relative to the storage you
|
||||
have allocated for Prometheus. It is wise to reduce the retention size to provide
|
||||
a buffer, ensuring that older entries will be removed before the allocated storage
|
||||
for Prometheus becomes full.
|
||||
|
||||
At present, we recommend setting the retention size to, at most, 80-85% of your
|
||||
allocated Prometheus disk space. This increases the likelihood that older entries
|
||||
At present, we recommend setting the retention size to, at most, 80-85% of your
|
||||
allocated Prometheus disk space. This increases the likelihood that older entries
|
||||
will be removed prior to hitting any disk limitations.
|
||||
|
||||
## Remote storage integrations
|
||||
|
|
|
@ -51,22 +51,22 @@ require (
|
|||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.30.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.31.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.125.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/oauth2 v0.26.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
|
||||
google.golang.org/grpc v1.71.1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/grpc v1.72.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
|
@ -41,8 +41,8 @@ github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvF
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q=
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
|
@ -318,10 +318,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/pdata v1.30.0 h1:j3jyq9um436r6WzWySzexP2nLnFdmL5uVBYAlyr9nDM=
|
||||
go.opentelemetry.io/collector/pdata v1.30.0/go.mod h1:0Bxu1ktuj4wE7PIASNSvd0SdBscQ1PLtYasymJ13/Cs=
|
||||
go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
|
||||
go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU=
|
||||
go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk=
|
||||
go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s=
|
||||
go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
|
@ -344,8 +344,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
|
@ -365,20 +365,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
|
||||
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -394,17 +394,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -419,12 +419,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
|
||||
google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
|
||||
google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
|
@ -145,7 +145,7 @@ func parseFlags() *config {
|
|||
|
||||
_, err := a.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
|
||||
fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err)
|
||||
a.Usage(os.Args[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) {
|
|||
case b == ':':
|
||||
result.WriteString("_.")
|
||||
default:
|
||||
result.WriteString(fmt.Sprintf("_%X", b))
|
||||
fmt.Fprintf(result, "_%X", b)
|
||||
}
|
||||
}
|
||||
result.WriteByte('"')
|
||||
|
|
40
go.mod
40
go.mod
|
@ -15,7 +15,7 @@ require (
|
|||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/dennwc/varint v1.0.0
|
||||
github.com/digitalocean/godo v1.144.0
|
||||
github.com/digitalocean/godo v1.145.0
|
||||
github.com/docker/docker v28.1.1+incompatible
|
||||
github.com/edsrzf/mmap-go v1.2.0
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||
|
@ -34,7 +34,7 @@ require (
|
|||
github.com/hashicorp/consul/api v1.32.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.21.0
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.3
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
|
@ -45,7 +45,7 @@ require (
|
|||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid/v2 v2.1.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.125.0
|
||||
github.com/ovh/go-ovh v1.7.0
|
||||
github.com/prometheus/alertmanager v0.28.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
|
@ -58,11 +58,11 @@ require (
|
|||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/component v1.30.0
|
||||
go.opentelemetry.io/collector/consumer v1.30.0
|
||||
go.opentelemetry.io/collector/pdata v1.30.0
|
||||
go.opentelemetry.io/collector/processor v1.30.0
|
||||
go.opentelemetry.io/collector/semconv v0.124.0
|
||||
go.opentelemetry.io/collector/component v1.31.0
|
||||
go.opentelemetry.io/collector/consumer v1.31.0
|
||||
go.opentelemetry.io/collector/pdata v1.31.0
|
||||
go.opentelemetry.io/collector/processor v1.31.0
|
||||
go.opentelemetry.io/collector/semconv v0.125.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
|
||||
go.opentelemetry.io/otel v1.35.0
|
||||
|
@ -80,7 +80,7 @@ require (
|
|||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/text v0.24.0
|
||||
google.golang.org/api v0.230.0
|
||||
google.golang.org/api v0.231.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb
|
||||
google.golang.org/grpc v1.72.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
|
@ -97,14 +97,14 @@ require (
|
|||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.30.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.31.0 // indirect
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.125.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/log v0.11.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.16.0 // indirect
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
|
@ -162,8 +162,8 @@ require (
|
|||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/knadh/koanf/maps v0.1.2 // indirect
|
||||
github.com/knadh/koanf/providers/confmap v0.1.0 // indirect
|
||||
github.com/knadh/koanf/v2 v2.1.2 // indirect
|
||||
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
|
||||
github.com/knadh/koanf/v2 v2.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
|
@ -180,8 +180,8 @@ require (
|
|||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.125.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.125.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
|
@ -197,9 +197,9 @@ require (
|
|||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.30.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.124.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap v1.31.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.125.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.125.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
|
@ -209,7 +209,7 @@ require (
|
|||
golang.org/x/term v0.31.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
|
116
go.sum
116
go.sum
|
@ -1,5 +1,5 @@
|
|||
cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU=
|
||||
cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
|
||||
cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU=
|
||||
cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
|
||||
|
@ -80,8 +80,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
|||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0=
|
||||
github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
|
||||
github.com/digitalocean/godo v1.145.0 h1:xBhWr+vCBy7GsexCUsWC+dKhPAWBMRLazavvXwyPBp8=
|
||||
github.com/digitalocean/godo v1.145.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
|
@ -253,8 +253,8 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY
|
|||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.4 h1:jTvGl4LOF8v8OYoEIBNVwbFoqSGAFqn6vGE7sp7/BqQ=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.4/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48=
|
||||
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
|
||||
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -280,10 +280,10 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
|||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
|
||||
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
|
||||
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
|
||||
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
|
||||
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
|
||||
github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo=
|
||||
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
|
||||
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
|
||||
github.com/knadh/koanf/v2 v2.2.0 h1:FZFwd9bUjpb8DyCWARUBy5ovuhDs1lI87dOEn2K8UVU=
|
||||
github.com/knadh/koanf/v2 v2.2.0/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -373,14 +373,14 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
|
|||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.125.0 h1:xNTSTF+Z0Vn3Nt2aUJ5JrJUUsrDA4l+oy2hVIGhPHxc=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.125.0/go.mod h1:wBz+TYCFKo0gZtIxORKtTKaUZqTJFTZh/bkyQ9tUqMg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.125.0 h1:IzhLlqlwxWM0PcGeyq6ispujXRTyzeA37LNtcQHOvdg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.125.0/go.mod h1:/WDZg8/Uk2niDeFWkijYvWkQ9gaRF0Vkj/RxGDRcMEY=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.125.0 h1:ZzDmvZcWi59c4gZLlkV+NbzDseuFNPePhgZ8XoZqfAI=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.125.0/go.mod h1:Hulx7f7AcWKM7crzT0HKxubNqN4qMF8wGyrC3W0BIYc=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.125.0 h1:nuXuleB2L/E8nctDbyRWKGv3DlAggzc4mtnQKf291PY=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.125.0/go.mod h1:AFs92cGgB/uaKbX48kuI7eawXr6eG93sCMvaCV5a/yw=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
|
@ -488,42 +488,42 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
|
|||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/component v1.30.0 h1:HXjqBHaQ47/EEuWdnkjr4Y3kRWvmyWIDvqa1Q262Fls=
|
||||
go.opentelemetry.io/collector/component v1.30.0/go.mod h1:vfM9kN+BM6oHBXWibquiprz8CVawxd4/aYy3nbhme3E=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.124.0/go.mod h1:NQ4ATOzMFc7QA06B993tq8o27DR0cu/JR/zK7slGJ3E=
|
||||
go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM=
|
||||
go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0=
|
||||
go.opentelemetry.io/collector/consumer v1.30.0 h1:Nn6kFTH+EJbv13E0W+sNvWrTgbiFCRv8f6DaA2F1DQs=
|
||||
go.opentelemetry.io/collector/consumer v1.30.0/go.mod h1:edRyfk61ugdhCQ93PBLRZfYMVWjdMPpKP8z5QLyESf0=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.124.0 h1:2arChG4RPrHW3lfVWlK/KDF7Y7qkUm/YAiBXh8oTue0=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.124.0/go.mod h1:Hlu+EXbINHxVAyIT1baKO2d0j5odR3fLlLAiaP+JqQg=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.124.0 h1:/cut96EWVNoz6lIeGI9+EzS6UClMtnZkx5YIpkD0Xe0=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.124.0/go.mod h1:fHH/MpzFCRNk/4foiYE6BoXQCAMf5sJTO35uvzVrrd4=
|
||||
go.opentelemetry.io/collector/featuregate v1.30.0 h1:mx7+iP/FQnY7KO8qw/xE3Qd1MQkWcU8VgcqLNrJ8EU8=
|
||||
go.opentelemetry.io/collector/featuregate v1.30.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.124.0 h1:kzd1/ZYhLj4bt2pDB529mL4rIRrRacemXodFNxfhdWk=
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.124.0/go.mod h1:ZjXjqV0dJ+6D4XGhTOxg/WHjnhdmXsmwmUSgALea66Y=
|
||||
go.opentelemetry.io/collector/pdata v1.30.0 h1:j3jyq9um436r6WzWySzexP2nLnFdmL5uVBYAlyr9nDM=
|
||||
go.opentelemetry.io/collector/pdata v1.30.0/go.mod h1:0Bxu1ktuj4wE7PIASNSvd0SdBscQ1PLtYasymJ13/Cs=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.124.0 h1:ZjL9wKqzP4BHj0/F1jfGxs1Va8B7xmYayipZeNVoWJE=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.124.0/go.mod h1:1EN3Gw5LSI4fSVma/Yfv/6nqeuYgRTm1/kmG5nE5Oyo=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o=
|
||||
go.opentelemetry.io/collector/pipeline v0.124.0 h1:hKvhDyH2GPnNO8LGL34ugf36sY7EOXPjBvlrvBhsOdw=
|
||||
go.opentelemetry.io/collector/pipeline v0.124.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
||||
go.opentelemetry.io/collector/processor v1.30.0 h1:dxmu+sO6MzQydyrf2CON5Hm1KU7yV4ofH1stmreUtPk=
|
||||
go.opentelemetry.io/collector/processor v1.30.0/go.mod h1:DjXAgelT8rfIWCTJP5kiPpxPqz4JLE1mJwsE2kJMTk8=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI=
|
||||
go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA=
|
||||
go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/collector/component v1.31.0 h1:9LzU8X1RhV3h8/QsAoTX23aFUfoJ3EUc9O/vK+hFpSI=
|
||||
go.opentelemetry.io/collector/component v1.31.0/go.mod h1:JbZl/KywXJxpUXPbt96qlEXJSym1zQ2hauMxYMuvlxM=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.125.0 h1:zlxGQZYd9kknRZSjRpOYW5SBjl0a5zYFYRPbreobXoU=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.125.0/go.mod h1:bHXc2W8bqqo9adOvCgvhcO7pYzJOSpyV4cuQ1wiIl04=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.125.0 h1:E2mpnMQbkMpYoZ3Q8pHx4kod7kedjwRs1xqDpzCe/84=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.125.0/go.mod h1:pQtsE1u/SPZdTphP5BZP64XbjXSq6wc+mDut5Ws/JDI=
|
||||
go.opentelemetry.io/collector/confmap v1.31.0 h1:+AW5VJc1rCtgEyGd+1J5uSNw/kVZ98+lKO/pqXEwVvU=
|
||||
go.opentelemetry.io/collector/confmap v1.31.0/go.mod h1:TdutQlIoHDPXcZ2xZ0QWGRkSFC8oTKO61zTx569dvrY=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.125.0 h1:Y0LPtz+xgtRYVAk2gZmvnBROEJj8C3YDiFPj5URbsX8=
|
||||
go.opentelemetry.io/collector/confmap/xconfmap v0.125.0/go.mod h1:8hNqCMs9Gzahh4W1h5XWOrQ+bE6NfP13WAggNyExJJs=
|
||||
go.opentelemetry.io/collector/consumer v1.31.0 h1:L+y66ywxLHnAxnUxv0JDwUf5bFj53kMxCCyEfRKlM7s=
|
||||
go.opentelemetry.io/collector/consumer v1.31.0/go.mod h1:rPsqy5ni+c6xNMUkOChleZYO/nInVY6eaBNZ1FmWJVk=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.125.0 h1:TUkxomGS4DAtjBvcWQd2UY4FDLLEKMQD6iOIDUr/5dM=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.125.0/go.mod h1:vkHf3y85cFLDHARO/cTREVjLjOPAV+cQg7lkC44DWOY=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.125.0 h1:oTreUlk1KpMSWwuHFnstW+orrjGTyvs2xd3o/Dpy+hI=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.125.0/go.mod h1:FX0G37r0W+wXRgxxFtwEJ4rlsCB+p0cIaxtU3C4hskw=
|
||||
go.opentelemetry.io/collector/featuregate v1.31.0 h1:20q7plPQZwmAiaYAa6l1m/i2qDITZuWlhjr4EkmeQls=
|
||||
go.opentelemetry.io/collector/featuregate v1.31.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.125.0 h1:6lcGOxw3dAg7LfXTKdN8ZjR+l7KvzLdEiPMhhLwG4r4=
|
||||
go.opentelemetry.io/collector/internal/telemetry v0.125.0/go.mod h1:5GyFslLqjZgq1DZTtFiluxYhhXrCofHgOOOybodDPGE=
|
||||
go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU=
|
||||
go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.125.0 h1:Qqlx8w1HpiYZ9RQqjmMQIysI0cHNO1nh3E/fCTeFysA=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.125.0/go.mod h1:p/yK023VxAp8hm27/1G5DPTcMIpnJy3cHGAFUQZGyaQ=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.125.0 h1:due1Hl0EEVRVwfCkiamRy5E8lS6yalv0lo8Zl/SJtGw=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.125.0/go.mod h1:1GpEWlgdMrd+fWsBk37ZC2YmOP5YU3gFQ4rWuCu9g24=
|
||||
go.opentelemetry.io/collector/pipeline v0.125.0 h1:oitBgcAFqntDB4ihQJUHJSQ8IHqKFpPkaTVbTYdIUzM=
|
||||
go.opentelemetry.io/collector/pipeline v0.125.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
||||
go.opentelemetry.io/collector/processor v1.31.0 h1:+u7sBUpnCBsHYoALp4hfr9VEjLHHYa4uKENGITe0K9Q=
|
||||
go.opentelemetry.io/collector/processor v1.31.0/go.mod h1:5hDYJ7/hTdfd2tF2Rj5Hs6+mfyFz2O7CaPzVvW1qHQc=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.125.0 h1:ZVAN4iZPDcWhpzKqnuok2NIuS5hwGVVQUOWkJFR12tA=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.125.0/go.mod h1:VAw0IRG35cWTBjBtreXeXJEgqkRegfjrH/EuLhNX2+I=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.125.0 h1:VWYPMW1VmDq6xB7M5SYjBpQCCIq3MhQ3W++wU47QpZM=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.125.0/go.mod h1:bCxUyFVlksANg8wjYZqWVsRB33lkLQ294rTrju/IZiM=
|
||||
go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s=
|
||||
go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU=
|
||||
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM=
|
||||
|
@ -645,12 +645,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM=
|
||||
google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ=
|
||||
google.golang.org/api v0.231.0 h1:LbUD5FUl0C4qwia2bjXhCMH65yz1MLPzA/0OYEsYY7Q=
|
||||
google.golang.org/api v0.231.0/go.mod h1:H52180fPI/QQlUc0F4xWfGZILdv09GCWKt2bcsn164A=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 h1:29cjnHVylHwTzH66WfFZqgSQgnxzvWE+jvBwpZCLRxY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM=
|
||||
google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
|
|
|
@ -1016,7 +1016,7 @@ type floatBucketIterator struct {
|
|||
|
||||
func (i *floatBucketIterator) At() Bucket[float64] {
|
||||
// Need to use i.targetSchema rather than i.baseBucketIterator.schema.
|
||||
return i.baseBucketIterator.at(i.targetSchema)
|
||||
return i.at(i.targetSchema)
|
||||
}
|
||||
|
||||
func (i *floatBucketIterator) Next() bool {
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !stringlabels && !dedupelabels
|
||||
//go:build slicelabels
|
||||
|
||||
package labels
|
||||
|
||||
|
@ -32,8 +32,8 @@ func (ls Labels) Len() int { return len(ls) }
|
|||
func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] }
|
||||
func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
|
||||
|
||||
// Bytes returns ls as a byte slice.
|
||||
// It uses an byte invalid character as a separator and so should not be used for printing.
|
||||
// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key.
|
||||
// Encoding may change over time or between runs of Prometheus.
|
||||
func (ls Labels) Bytes(buf []byte) []byte {
|
||||
b := bytes.NewBuffer(buf[:0])
|
||||
b.WriteByte(labelSep)
|
||||
|
@ -453,7 +453,7 @@ func (b *ScratchBuilder) Add(name, value string) {
|
|||
}
|
||||
|
||||
// UnsafeAddBytes adds a name/value pair, using []byte instead of string.
|
||||
// The '-tags stringlabels' version of this function is unsafe, hence the name.
|
||||
// The default version of this function is unsafe, hence the name.
|
||||
// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles.
|
||||
func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
|
||||
b.add = append(b.add, Label{Name: string(name), Value: string(value)})
|
||||
|
|
|
@ -167,10 +167,8 @@ func (b *Builder) Del(ns ...string) *Builder {
|
|||
// Keep removes all labels from the base except those with the given names.
|
||||
func (b *Builder) Keep(ns ...string) *Builder {
|
||||
b.base.Range(func(l Label) {
|
||||
for _, n := range ns {
|
||||
if l.Name == n {
|
||||
return
|
||||
}
|
||||
if slices.Contains(ns, l.Name) {
|
||||
return
|
||||
}
|
||||
b.del = append(b.del, l.Name)
|
||||
})
|
||||
|
|
|
@ -140,8 +140,8 @@ func decodeString(t *nameTable, data string, index int) (string, int) {
|
|||
return t.ToName(num), index
|
||||
}
|
||||
|
||||
// Bytes returns ls as a byte slice.
|
||||
// It uses non-printing characters and so should not be used for printing.
|
||||
// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key.
|
||||
// Encoding may change over time or between runs of Prometheus.
|
||||
func (ls Labels) Bytes(buf []byte) []byte {
|
||||
b := bytes.NewBuffer(buf[:0])
|
||||
for i := 0; i < len(ls.data); {
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build stringlabels
|
||||
//go:build !slicelabels && !dedupelabels
|
||||
|
||||
package labels
|
||||
|
||||
|
@ -24,31 +24,25 @@ import (
|
|||
)
|
||||
|
||||
// Labels is implemented by a single flat string holding name/value pairs.
|
||||
// Each name and value is preceded by its length in varint encoding.
|
||||
// Each name and value is preceded by its length, encoded as a single byte
|
||||
// for size 0-254, or the following 3 bytes little-endian, if the first byte is 255.
|
||||
// Maximum length allowed is 2^24 or 16MB.
|
||||
// Names are in order.
|
||||
type Labels struct {
|
||||
data string
|
||||
}
|
||||
|
||||
func decodeSize(data string, index int) (int, int) {
|
||||
// Fast-path for common case of a single byte, value 0..127.
|
||||
b := data[index]
|
||||
index++
|
||||
if b < 0x80 {
|
||||
return int(b), index
|
||||
}
|
||||
size := int(b & 0x7F)
|
||||
for shift := uint(7); ; shift += 7 {
|
||||
if b == 255 {
|
||||
// Larger numbers are encoded as 3 bytes little-endian.
|
||||
// Just panic if we go of the end of data, since all Labels strings are constructed internally and
|
||||
// malformed data indicates a bug, or memory corruption.
|
||||
b := data[index]
|
||||
index++
|
||||
size |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
return int(data[index]) + (int(data[index+1]) << 8) + (int(data[index+2]) << 16), index + 3
|
||||
}
|
||||
return size, index
|
||||
// More common case of a single byte, value 0..254.
|
||||
return int(b), index
|
||||
}
|
||||
|
||||
func decodeString(data string, index int) (string, int) {
|
||||
|
@ -57,8 +51,8 @@ func decodeString(data string, index int) (string, int) {
|
|||
return data[index : index+size], index + size
|
||||
}
|
||||
|
||||
// Bytes returns ls as a byte slice.
|
||||
// It uses non-printing characters and so should not be used for printing.
|
||||
// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key.
|
||||
// Encoding may change over time or between runs of Prometheus.
|
||||
func (ls Labels) Bytes(buf []byte) []byte {
|
||||
if cap(buf) < len(ls.data) {
|
||||
buf = make([]byte, len(ls.data))
|
||||
|
@ -76,7 +70,7 @@ func (ls Labels) IsZero() bool {
|
|||
|
||||
// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
|
||||
// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
|
||||
// TODO: This is only used in printing an error message
|
||||
// TODO: This is only used in printing an error message.
|
||||
func (ls Labels) MatchLabels(on bool, names ...string) Labels {
|
||||
b := NewBuilder(ls)
|
||||
if on {
|
||||
|
@ -298,6 +292,7 @@ func Equal(ls, o Labels) bool {
|
|||
func EmptyLabels() Labels {
|
||||
return Labels{}
|
||||
}
|
||||
|
||||
func yoloBytes(s string) []byte {
|
||||
return unsafe.Slice(unsafe.StringData(s), len(s))
|
||||
}
|
||||
|
@ -370,7 +365,7 @@ func Compare(a, b Labels) int {
|
|||
return +1
|
||||
}
|
||||
|
||||
// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
|
||||
// CopyFrom will copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed.
|
||||
func (ls *Labels) CopyFrom(b Labels) {
|
||||
ls.data = b.data // strings are immutable
|
||||
}
|
||||
|
@ -440,11 +435,11 @@ func (ls Labels) DropMetricName() Labels {
|
|||
}
|
||||
|
||||
// InternStrings is a no-op because it would only save when the whole set of labels is identical.
|
||||
func (ls *Labels) InternStrings(intern func(string) string) {
|
||||
func (ls *Labels) InternStrings(_ func(string) string) {
|
||||
}
|
||||
|
||||
// ReleaseStrings is a no-op for the same reason as InternStrings.
|
||||
func (ls Labels) ReleaseStrings(release func(string)) {
|
||||
func (ls Labels) ReleaseStrings(_ func(string)) {
|
||||
}
|
||||
|
||||
// Builder allows modifying Labels.
|
||||
|
@ -527,48 +522,27 @@ func marshalLabelToSizedBuffer(m *Label, data []byte) int {
|
|||
return len(data) - i
|
||||
}
|
||||
|
||||
func sizeVarint(x uint64) (n int) {
|
||||
// Most common case first
|
||||
if x < 1<<7 {
|
||||
func sizeWhenEncoded(x uint64) (n int) {
|
||||
if x < 255 {
|
||||
return 1
|
||||
} else if x <= 1<<24 {
|
||||
return 4
|
||||
}
|
||||
if x >= 1<<56 {
|
||||
return 9
|
||||
}
|
||||
if x >= 1<<28 {
|
||||
x >>= 28
|
||||
n = 4
|
||||
}
|
||||
if x >= 1<<14 {
|
||||
x >>= 14
|
||||
n += 2
|
||||
}
|
||||
if x >= 1<<7 {
|
||||
n++
|
||||
}
|
||||
return n + 1
|
||||
panic("String too long to encode as label.")
|
||||
}
|
||||
|
||||
func encodeVarint(data []byte, offset int, v uint64) int {
|
||||
offset -= sizeVarint(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
|
||||
// Special code for the common case that a size is less than 128
|
||||
func encodeSize(data []byte, offset, v int) int {
|
||||
if v < 1<<7 {
|
||||
if v < 255 {
|
||||
offset--
|
||||
data[offset] = uint8(v)
|
||||
return offset
|
||||
}
|
||||
return encodeVarint(data, offset, uint64(v))
|
||||
offset -= 4
|
||||
data[offset] = 255
|
||||
data[offset+1] = byte(v)
|
||||
data[offset+2] = byte((v >> 8))
|
||||
data[offset+3] = byte((v >> 16))
|
||||
return offset
|
||||
}
|
||||
|
||||
func labelsSize(lbls []Label) (n int) {
|
||||
|
@ -582,9 +556,9 @@ func labelsSize(lbls []Label) (n int) {
|
|||
func labelSize(m *Label) (n int) {
|
||||
// strings are encoded as length followed by contents.
|
||||
l := len(m.Name)
|
||||
n += l + sizeVarint(uint64(l))
|
||||
n += l + sizeWhenEncoded(uint64(l))
|
||||
l = len(m.Value)
|
||||
n += l + sizeVarint(uint64(l))
|
||||
n += l + sizeWhenEncoded(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -630,7 +604,7 @@ func (b *ScratchBuilder) Add(name, value string) {
|
|||
b.add = append(b.add, Label{Name: name, Value: value})
|
||||
}
|
||||
|
||||
// Add a name/value pair, using []byte instead of string to reduce memory allocations.
|
||||
// UnsafeAddBytes adds a name/value pair using []byte instead of string to reduce memory allocations.
|
||||
// The values must remain live until Labels() is called.
|
||||
func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) {
|
||||
b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)})
|
||||
|
@ -658,7 +632,7 @@ func (b *ScratchBuilder) Labels() Labels {
|
|||
return b.output
|
||||
}
|
||||
|
||||
// Write the newly-built Labels out to ls, reusing an internal buffer.
|
||||
// Overwrite will write the newly-built Labels out to ls, reusing an internal buffer.
|
||||
// Callers must ensure that there are no other references to ls, or any strings fetched from it.
|
||||
func (b *ScratchBuilder) Overwrite(ls *Labels) {
|
||||
size := labelsSize(b.add)
|
||||
|
@ -671,7 +645,7 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) {
|
|||
ls.data = yoloString(b.overwriteBuffer)
|
||||
}
|
||||
|
||||
// Symbol-table is no-op, just for api parity with dedupelabels.
|
||||
// SymbolTable is no-op, just for api parity with dedupelabels.
|
||||
type SymbolTable struct{}
|
||||
|
||||
func NewSymbolTable() *SymbolTable { return nil }
|
||||
|
|
|
@ -27,6 +27,8 @@ import (
|
|||
)
|
||||
|
||||
func TestLabels_String(t *testing.T) {
|
||||
s254 := strings.Repeat("x", 254) // Edge cases for stringlabels encoding.
|
||||
s255 := strings.Repeat("x", 255)
|
||||
cases := []struct {
|
||||
labels Labels
|
||||
expected string
|
||||
|
@ -43,6 +45,14 @@ func TestLabels_String(t *testing.T) {
|
|||
labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
|
||||
expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`,
|
||||
},
|
||||
{
|
||||
labels: FromStrings("aaa", "111", "xx", s254),
|
||||
expected: `{aaa="111", xx="` + s254 + `"}`,
|
||||
},
|
||||
{
|
||||
labels: FromStrings("aaa", "111", "xx", s255),
|
||||
expected: `{aaa="111", xx="` + s255 + `"}`,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
str := c.labels.String()
|
||||
|
@ -503,7 +513,7 @@ func TestLabels_Has(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLabels_Get(t *testing.T) {
|
||||
require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo"))
|
||||
require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo"))
|
||||
require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa"))
|
||||
require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb"))
|
||||
}
|
||||
|
|
|
@ -95,12 +95,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
|
|||
|
||||
return func(s string) bool {
|
||||
if len(m.setMatches) != 0 {
|
||||
for _, match := range m.setMatches {
|
||||
if match == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(m.setMatches, s)
|
||||
}
|
||||
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
|
||||
return false
|
||||
|
@ -771,16 +766,11 @@ func (m *equalMultiStringSliceMatcher) setMatches() []string {
|
|||
|
||||
func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
|
||||
if m.caseSensitive {
|
||||
for _, v := range m.values {
|
||||
if s == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, v := range m.values {
|
||||
if strings.EqualFold(s, v) {
|
||||
return true
|
||||
}
|
||||
return slices.Contains(m.values, s)
|
||||
}
|
||||
for _, v := range m.values {
|
||||
if strings.EqualFold(s, v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !stringlabels && !dedupelabels
|
||||
//go:build slicelabels
|
||||
|
||||
package labels
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build stringlabels
|
||||
//go:build !slicelabels && !dedupelabels
|
||||
|
||||
package labels
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ func (m *MetricStreamingDecoder) NextMetricFamily() error {
|
|||
m.mfData = b[varIntLength:totalLength]
|
||||
|
||||
m.inPos += totalLength
|
||||
return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData)
|
||||
return m.unmarshalWithoutMetrics(m, m.mfData)
|
||||
}
|
||||
|
||||
// resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory.
|
||||
|
@ -98,7 +98,7 @@ func (m *MetricStreamingDecoder) NextMetric() error {
|
|||
|
||||
m.resetMetric()
|
||||
m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end]
|
||||
if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil {
|
||||
if err := m.unmarshalWithoutLabels(m, m.mData); err != nil {
|
||||
return err
|
||||
}
|
||||
m.metricIndex++
|
||||
|
@ -111,37 +111,37 @@ func (m *MetricStreamingDecoder) resetMetric() {
|
|||
m.TimestampMs = 0
|
||||
|
||||
// TODO(bwplotka): Autogenerate reset functions.
|
||||
if m.Metric.Counter != nil {
|
||||
m.Metric.Counter.Value = 0
|
||||
m.Metric.Counter.CreatedTimestamp = nil
|
||||
m.Metric.Counter.Exemplar = nil
|
||||
if m.Counter != nil {
|
||||
m.Counter.Value = 0
|
||||
m.Counter.CreatedTimestamp = nil
|
||||
m.Counter.Exemplar = nil
|
||||
}
|
||||
if m.Metric.Gauge != nil {
|
||||
m.Metric.Gauge.Value = 0
|
||||
if m.Gauge != nil {
|
||||
m.Gauge.Value = 0
|
||||
}
|
||||
if m.Metric.Histogram != nil {
|
||||
m.Metric.Histogram.SampleCount = 0
|
||||
m.Metric.Histogram.SampleCountFloat = 0
|
||||
m.Metric.Histogram.SampleSum = 0
|
||||
m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0]
|
||||
m.Metric.Histogram.CreatedTimestamp = nil
|
||||
m.Metric.Histogram.Schema = 0
|
||||
m.Metric.Histogram.ZeroThreshold = 0
|
||||
m.Metric.Histogram.ZeroCount = 0
|
||||
m.Metric.Histogram.ZeroCountFloat = 0
|
||||
m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0]
|
||||
m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0]
|
||||
m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0]
|
||||
m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0]
|
||||
m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0]
|
||||
m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0]
|
||||
m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0]
|
||||
if m.Histogram != nil {
|
||||
m.Histogram.SampleCount = 0
|
||||
m.Histogram.SampleCountFloat = 0
|
||||
m.Histogram.SampleSum = 0
|
||||
m.Histogram.Bucket = m.Histogram.Bucket[:0]
|
||||
m.Histogram.CreatedTimestamp = nil
|
||||
m.Histogram.Schema = 0
|
||||
m.Histogram.ZeroThreshold = 0
|
||||
m.Histogram.ZeroCount = 0
|
||||
m.Histogram.ZeroCountFloat = 0
|
||||
m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0]
|
||||
m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0]
|
||||
m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0]
|
||||
m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0]
|
||||
m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0]
|
||||
m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0]
|
||||
m.Histogram.Exemplars = m.Histogram.Exemplars[:0]
|
||||
}
|
||||
if m.Metric.Summary != nil {
|
||||
m.Metric.Summary.SampleCount = 0
|
||||
m.Metric.Summary.SampleSum = 0
|
||||
m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0]
|
||||
m.Metric.Summary.CreatedTimestamp = nil
|
||||
if m.Summary != nil {
|
||||
m.Summary.SampleCount = 0
|
||||
m.Summary.SampleSum = 0
|
||||
m.Summary.Quantile = m.Summary.Quantile[:0]
|
||||
m.Summary.CreatedTimestamp = nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -135,12 +135,12 @@ func TestToMetadata(t *testing.T) {
|
|||
|
||||
func TestToHistogram_Empty(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
|
||||
require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
|
||||
require.NotNil(t, prompb.Histogram{}.ToIntHistogram())
|
||||
require.NotNil(t, prompb.Histogram{}.ToFloatHistogram())
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
|
||||
require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
|
||||
require.NotNil(t, writev2.Histogram{}.ToIntHistogram())
|
||||
require.NotNil(t, writev2.Histogram{}.ToFloatHistogram())
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -89,8 +89,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
|
|||
}
|
||||
}
|
||||
|
||||
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||
stor.DB.Compact(ctx)
|
||||
stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||
stor.Compact(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ func rangeQueryCases() []benchCase {
|
|||
|
||||
func BenchmarkRangeQuery(b *testing.B) {
|
||||
stor := teststorage.New(b)
|
||||
stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings.
|
||||
stor.DisableCompactions() // Don't want auto-compaction disrupting timings.
|
||||
defer stor.Close()
|
||||
opts := promql.EngineOpts{
|
||||
Logger: nil,
|
||||
|
@ -498,8 +498,8 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
|
|||
require.NoError(tb, a.Commit())
|
||||
}
|
||||
|
||||
stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||
stor.DB.Compact(ctx)
|
||||
stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series.
|
||||
stor.Compact(ctx)
|
||||
}
|
||||
|
||||
func generateNativeHistogramSeries(app storage.Appender, numSeries int) error {
|
||||
|
|
136
promql/engine.go
136
promql/engine.go
|
@ -731,7 +731,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval
|
|||
setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr)
|
||||
evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval)
|
||||
// Instant evaluation. This is executed as a range evaluation with one step.
|
||||
if s.Start == s.End && s.Interval == 0 {
|
||||
if s.Start.Equal(s.End) && s.Interval == 0 {
|
||||
start := timeMilliseconds(s.Start)
|
||||
evaluator := &evaluator{
|
||||
startTimestamp: start,
|
||||
|
@ -1202,7 +1202,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
|
|||
mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F})
|
||||
}
|
||||
|
||||
for _, sample := range enh.nativeHistogramSamples {
|
||||
for idx, sample := range enh.nativeHistogramSamples {
|
||||
// We have to reconstruct the exact same signature as above for
|
||||
// a classic histogram, just ignoring any le label.
|
||||
enh.lblBuf = sample.Metric.Bytes(enh.lblBuf)
|
||||
|
@ -1212,6 +1212,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota
|
|||
// labels. Do not evaluate anything.
|
||||
annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange()))
|
||||
delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf))
|
||||
enh.nativeHistogramSamples[idx].H = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -1376,7 +1377,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label
|
|||
return mat, warnings
|
||||
}
|
||||
|
||||
func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) {
|
||||
func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, params *fParams) (Matrix, annotations.Annotations) {
|
||||
// Keep a copy of the original point slice so that it can be returned to the pool.
|
||||
origMatrix := slices.Clone(inputMatrix)
|
||||
defer func() {
|
||||
|
@ -1386,7 +1387,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
|||
}
|
||||
}()
|
||||
|
||||
var warnings annotations.Annotations
|
||||
var annos annotations.Annotations
|
||||
|
||||
enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval}
|
||||
tempNumSamples := ev.currentSamples
|
||||
|
@ -1416,46 +1417,43 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
|||
}
|
||||
groups := make([]groupedAggregation, groupCount)
|
||||
|
||||
var k int64
|
||||
var ratio float64
|
||||
var seriess map[uint64]Series
|
||||
|
||||
switch aggExpr.Op {
|
||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
||||
if !convertibleToInt64(param) {
|
||||
ev.errorf("Scalar value %v overflows int64", param)
|
||||
// Return early if all k values are less than one.
|
||||
if params.Max() < 1 {
|
||||
return nil, annos
|
||||
}
|
||||
k = int64(param)
|
||||
if k > int64(len(inputMatrix)) {
|
||||
k = int64(len(inputMatrix))
|
||||
}
|
||||
if k < 1 {
|
||||
return nil, warnings
|
||||
}
|
||||
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
|
||||
seriess = make(map[uint64]Series, len(inputMatrix))
|
||||
|
||||
case parser.LIMIT_RATIO:
|
||||
if math.IsNaN(param) {
|
||||
ev.errorf("Ratio value %v is NaN", param)
|
||||
// Return early if all r values are zero.
|
||||
if params.Max() == 0 && params.Min() == 0 {
|
||||
return nil, annos
|
||||
}
|
||||
switch {
|
||||
case param == 0:
|
||||
return nil, warnings
|
||||
case param < -1.0:
|
||||
ratio = -1.0
|
||||
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
|
||||
case param > 1.0:
|
||||
ratio = 1.0
|
||||
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
|
||||
default:
|
||||
ratio = param
|
||||
if params.Max() > 1.0 {
|
||||
annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange()))
|
||||
}
|
||||
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
|
||||
if params.Min() < -1.0 {
|
||||
annos.Add(annotations.NewInvalidRatioWarning(params.Min(), -1.0, aggExpr.Param.PositionRange()))
|
||||
}
|
||||
seriess = make(map[uint64]Series, len(inputMatrix))
|
||||
|
||||
case parser.QUANTILE:
|
||||
if math.IsNaN(param) || param < 0 || param > 1 {
|
||||
warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange()))
|
||||
if params.HasAnyNaN() {
|
||||
annos.Add(annotations.NewInvalidQuantileWarning(math.NaN(), aggExpr.Param.PositionRange()))
|
||||
}
|
||||
if params.Max() > 1 {
|
||||
annos.Add(annotations.NewInvalidQuantileWarning(params.Max(), aggExpr.Param.PositionRange()))
|
||||
}
|
||||
if params.Min() < 0 {
|
||||
annos.Add(annotations.NewInvalidQuantileWarning(params.Min(), aggExpr.Param.PositionRange()))
|
||||
}
|
||||
}
|
||||
|
||||
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
|
||||
fParam := params.Next()
|
||||
if err := contextDone(ctx, "expression evaluation"); err != nil {
|
||||
ev.error(err)
|
||||
}
|
||||
|
@ -1467,17 +1465,17 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
|||
var ws annotations.Annotations
|
||||
switch aggExpr.Op {
|
||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO:
|
||||
result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess)
|
||||
result, ws = ev.aggregationK(aggExpr, fParam, inputMatrix, seriesToResult, groups, enh, seriess)
|
||||
// If this could be an instant query, shortcut so as not to change sort order.
|
||||
if ev.endTimestamp == ev.startTimestamp {
|
||||
warnings.Merge(ws)
|
||||
return result, warnings
|
||||
if ev.startTimestamp == ev.endTimestamp {
|
||||
annos.Merge(ws)
|
||||
return result, annos
|
||||
}
|
||||
default:
|
||||
ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh)
|
||||
ws = ev.aggregation(aggExpr, fParam, inputMatrix, result, seriesToResult, groups, enh)
|
||||
}
|
||||
|
||||
warnings.Merge(ws)
|
||||
annos.Merge(ws)
|
||||
|
||||
if ev.currentSamples > ev.maxSamples {
|
||||
ev.error(ErrTooManySamples(env))
|
||||
|
@ -1502,7 +1500,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
|||
}
|
||||
result = result[:dst]
|
||||
}
|
||||
return result, warnings
|
||||
return result, annos
|
||||
}
|
||||
|
||||
// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset.
|
||||
|
@ -1680,18 +1678,14 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
|||
var warnings annotations.Annotations
|
||||
originalNumSamples := ev.currentSamples
|
||||
// param is the number k for topk/bottomk, or q for quantile.
|
||||
var fParam float64
|
||||
if param != nil {
|
||||
val, ws := ev.eval(ctx, param)
|
||||
warnings.Merge(ws)
|
||||
fParam = val.(Matrix)[0].Floats[0].F
|
||||
}
|
||||
fp, ws := newFParams(ctx, ev, param)
|
||||
warnings.Merge(ws)
|
||||
// Now fetch the data to be aggregated.
|
||||
val, ws := ev.eval(ctx, e.Expr)
|
||||
warnings.Merge(ws)
|
||||
inputMatrix := val.(Matrix)
|
||||
|
||||
result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam)
|
||||
result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fp)
|
||||
warnings.Merge(ws)
|
||||
ev.currentSamples = originalNumSamples + result.TotalSamples()
|
||||
ev.samplesStats.UpdatePeak(ev.currentSamples)
|
||||
|
@ -3268,7 +3262,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
// seriesToResult maps inputMatrix indexes to groups indexes.
|
||||
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio.
|
||||
// For a range query, aggregates output in the seriess map.
|
||||
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
|
||||
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, fParam float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
|
||||
op := e.Op
|
||||
var s Sample
|
||||
var annos annotations.Annotations
|
||||
|
@ -3277,6 +3271,14 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, i
|
|||
for i := range groups {
|
||||
groups[i].seen = false
|
||||
}
|
||||
// advanceRemainingSeries discards any values at the current timestamp `ts`
|
||||
// for the remaining input series. In range queries, if these values are not
|
||||
// consumed now, they will no longer be accessible in the next evaluation step.
|
||||
advanceRemainingSeries := func(ts int64, startIdx int) {
|
||||
for i := startIdx; i < len(inputMatrix); i++ {
|
||||
_, _, _ = ev.nextValues(ts, &inputMatrix[i])
|
||||
}
|
||||
}
|
||||
|
||||
seriesLoop:
|
||||
for si := range inputMatrix {
|
||||
|
@ -3286,6 +3288,42 @@ seriesLoop:
|
|||
}
|
||||
s = Sample{Metric: inputMatrix[si].Metric, F: f, H: h, DropName: inputMatrix[si].DropName}
|
||||
|
||||
var k int64
|
||||
var r float64
|
||||
switch op {
|
||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
||||
if !convertibleToInt64(fParam) {
|
||||
ev.errorf("Scalar value %v overflows int64", fParam)
|
||||
}
|
||||
k = int64(fParam)
|
||||
if k > int64(len(inputMatrix)) {
|
||||
k = int64(len(inputMatrix))
|
||||
}
|
||||
if k < 1 {
|
||||
if enh.Ts != ev.endTimestamp {
|
||||
advanceRemainingSeries(enh.Ts, si+1)
|
||||
}
|
||||
return nil, annos
|
||||
}
|
||||
case parser.LIMIT_RATIO:
|
||||
if math.IsNaN(fParam) {
|
||||
ev.errorf("Ratio value %v is NaN", fParam)
|
||||
}
|
||||
switch {
|
||||
case fParam == 0:
|
||||
if enh.Ts != ev.endTimestamp {
|
||||
advanceRemainingSeries(enh.Ts, si+1)
|
||||
}
|
||||
return nil, annos
|
||||
case fParam < -1.0:
|
||||
r = -1.0
|
||||
case fParam > 1.0:
|
||||
r = 1.0
|
||||
default:
|
||||
r = fParam
|
||||
}
|
||||
}
|
||||
|
||||
group := &groups[seriesToResult[si]]
|
||||
// Initialize this group if it's the first time we've seen it.
|
||||
if !group.seen {
|
||||
|
@ -3376,6 +3414,10 @@ seriesLoop:
|
|||
group.groupAggrComplete = true
|
||||
groupsRemaining--
|
||||
if groupsRemaining == 0 {
|
||||
// Process other values in the series before breaking the loop in case of range query.
|
||||
if enh.Ts != ev.endTimestamp {
|
||||
advanceRemainingSeries(enh.Ts, si+1)
|
||||
}
|
||||
break seriesLoop
|
||||
}
|
||||
}
|
||||
|
|
|
@ -612,7 +612,6 @@ func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper
|
|||
|
||||
// === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) ===
|
||||
func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
vec := vals[0].(Vector)
|
||||
// round returns a number rounded to toNearest.
|
||||
// Ties are solved by rounding up.
|
||||
toNearest := float64(1)
|
||||
|
@ -621,23 +620,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
|||
}
|
||||
// Invert as it seems to cause fewer floating point accuracy issues.
|
||||
toNearestInverse := 1.0 / toNearest
|
||||
|
||||
for _, el := range vec {
|
||||
if el.H != nil {
|
||||
// Process only float samples.
|
||||
continue
|
||||
}
|
||||
f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric,
|
||||
F: f,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
return simpleFloatFunc(vals, enh, func(f float64) float64 {
|
||||
return math.Floor(f*toNearestInverse+0.5) / toNearestInverse
|
||||
}), nil
|
||||
}
|
||||
|
||||
// === Scalar(node parser.ValueTypeVector) Scalar ===
|
||||
|
@ -823,8 +808,8 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
}), annos
|
||||
}
|
||||
|
||||
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime.
|
||||
func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool) (Vector, annotations.Annotations) {
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
|
@ -837,7 +822,7 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
maxVal := s.Floats[0].F
|
||||
for _, f := range s.Floats {
|
||||
if f.F > maxVal || math.IsNaN(maxVal) {
|
||||
if compareFn(f.F, maxVal) {
|
||||
maxVal = f.F
|
||||
}
|
||||
}
|
||||
|
@ -845,26 +830,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
}), annos
|
||||
}
|
||||
|
||||
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool {
|
||||
return (cur > maxVal) || math.IsNaN(maxVal)
|
||||
})
|
||||
}
|
||||
|
||||
// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
return enh.Out, nil
|
||||
}
|
||||
if len(samples.Histograms) > 0 {
|
||||
metricName := samples.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
minVal := s.Floats[0].F
|
||||
for _, f := range s.Floats {
|
||||
if f.F < minVal || math.IsNaN(minVal) {
|
||||
minVal = f.F
|
||||
}
|
||||
}
|
||||
return minVal
|
||||
}), annos
|
||||
return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool {
|
||||
return (cur < maxVal) || math.IsNaN(maxVal)
|
||||
})
|
||||
}
|
||||
|
||||
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
|
@ -997,7 +974,7 @@ func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNod
|
|||
}), nil
|
||||
}
|
||||
|
||||
func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
|
||||
func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector {
|
||||
for _, el := range vals[0].(Vector) {
|
||||
if el.H == nil { // Process only float samples.
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
|
@ -1015,114 +992,114 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6
|
|||
|
||||
// === abs(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Abs), nil
|
||||
return simpleFloatFunc(vals, enh, math.Abs), nil
|
||||
}
|
||||
|
||||
// === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Ceil), nil
|
||||
return simpleFloatFunc(vals, enh, math.Ceil), nil
|
||||
}
|
||||
|
||||
// === floor(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Floor), nil
|
||||
return simpleFloatFunc(vals, enh, math.Floor), nil
|
||||
}
|
||||
|
||||
// === exp(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Exp), nil
|
||||
return simpleFloatFunc(vals, enh, math.Exp), nil
|
||||
}
|
||||
|
||||
// === sqrt(Vector VectorNode) (Vector, Annotations) ===
|
||||
func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Sqrt), nil
|
||||
return simpleFloatFunc(vals, enh, math.Sqrt), nil
|
||||
}
|
||||
|
||||
// === ln(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Log), nil
|
||||
return simpleFloatFunc(vals, enh, math.Log), nil
|
||||
}
|
||||
|
||||
// === log2(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Log2), nil
|
||||
return simpleFloatFunc(vals, enh, math.Log2), nil
|
||||
}
|
||||
|
||||
// === log10(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Log10), nil
|
||||
return simpleFloatFunc(vals, enh, math.Log10), nil
|
||||
}
|
||||
|
||||
// === sin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Sin), nil
|
||||
return simpleFloatFunc(vals, enh, math.Sin), nil
|
||||
}
|
||||
|
||||
// === cos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Cos), nil
|
||||
return simpleFloatFunc(vals, enh, math.Cos), nil
|
||||
}
|
||||
|
||||
// === tan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Tan), nil
|
||||
return simpleFloatFunc(vals, enh, math.Tan), nil
|
||||
}
|
||||
|
||||
// === asin(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Asin), nil
|
||||
return simpleFloatFunc(vals, enh, math.Asin), nil
|
||||
}
|
||||
|
||||
// === acos(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Acos), nil
|
||||
return simpleFloatFunc(vals, enh, math.Acos), nil
|
||||
}
|
||||
|
||||
// === atan(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Atan), nil
|
||||
return simpleFloatFunc(vals, enh, math.Atan), nil
|
||||
}
|
||||
|
||||
// === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Sinh), nil
|
||||
return simpleFloatFunc(vals, enh, math.Sinh), nil
|
||||
}
|
||||
|
||||
// === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Cosh), nil
|
||||
return simpleFloatFunc(vals, enh, math.Cosh), nil
|
||||
}
|
||||
|
||||
// === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Tanh), nil
|
||||
return simpleFloatFunc(vals, enh, math.Tanh), nil
|
||||
}
|
||||
|
||||
// === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Asinh), nil
|
||||
return simpleFloatFunc(vals, enh, math.Asinh), nil
|
||||
}
|
||||
|
||||
// === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Acosh), nil
|
||||
return simpleFloatFunc(vals, enh, math.Acosh), nil
|
||||
}
|
||||
|
||||
// === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, math.Atanh), nil
|
||||
return simpleFloatFunc(vals, enh, math.Atanh), nil
|
||||
}
|
||||
|
||||
// === rad(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
return simpleFloatFunc(vals, enh, func(v float64) float64 {
|
||||
return v * math.Pi / 180
|
||||
}), nil
|
||||
}
|
||||
|
||||
// === deg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
return simpleFloatFunc(vals, enh, func(v float64) float64 {
|
||||
return v * 180 / math.Pi
|
||||
}), nil
|
||||
}
|
||||
|
@ -1134,7 +1111,7 @@ func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector,
|
|||
|
||||
// === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
return simpleFloatFunc(vals, enh, func(v float64) float64 {
|
||||
switch {
|
||||
case v < 0:
|
||||
return -1
|
||||
|
@ -1271,79 +1248,48 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo
|
|||
return append(enh.Out, Sample{F: slope*duration + intercept}), nil
|
||||
}
|
||||
|
||||
func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector {
|
||||
for _, el := range vals[0].(Vector) {
|
||||
if el.H != nil { // Process only histogram samples.
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
el.Metric = el.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: el.Metric,
|
||||
F: f(el.H),
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
return enh.Out
|
||||
}
|
||||
|
||||
// === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
inVec := vals[0].(Vector)
|
||||
|
||||
for _, sample := range inVec {
|
||||
// Skip non-histogram samples.
|
||||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric,
|
||||
F: sample.H.Count,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
|
||||
return h.Count
|
||||
}), nil
|
||||
}
|
||||
|
||||
// === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
inVec := vals[0].(Vector)
|
||||
|
||||
for _, sample := range inVec {
|
||||
// Skip non-histogram samples.
|
||||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric,
|
||||
F: sample.H.Sum,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
|
||||
return h.Sum
|
||||
}), nil
|
||||
}
|
||||
|
||||
// === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
inVec := vals[0].(Vector)
|
||||
|
||||
for _, sample := range inVec {
|
||||
// Skip non-histogram samples.
|
||||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric,
|
||||
F: sample.H.Sum / sample.H.Count,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
|
||||
return h.Sum / h.Count
|
||||
}), nil
|
||||
}
|
||||
|
||||
func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) {
|
||||
vec := vals[0].(Vector)
|
||||
for _, sample := range vec {
|
||||
// Skip non-histogram samples.
|
||||
if sample.H == nil {
|
||||
continue
|
||||
}
|
||||
mean := sample.H.Sum / sample.H.Count
|
||||
return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 {
|
||||
mean := h.Sum / h.Count
|
||||
var variance, cVariance float64
|
||||
it := sample.H.AllBucketIterator()
|
||||
it := h.AllBucketIterator()
|
||||
for it.Next() {
|
||||
bucket := it.At()
|
||||
if bucket.Count == 0 {
|
||||
|
@ -1351,7 +1297,7 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul
|
|||
}
|
||||
var val float64
|
||||
switch {
|
||||
case sample.H.UsesCustomBuckets():
|
||||
case h.UsesCustomBuckets():
|
||||
// Use arithmetic mean in case of custom buckets.
|
||||
val = (bucket.Upper + bucket.Lower) / 2.0
|
||||
case bucket.Lower <= 0 && bucket.Upper >= 0:
|
||||
|
@ -1368,20 +1314,12 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul
|
|||
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
|
||||
}
|
||||
variance += cVariance
|
||||
variance /= sample.H.Count
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
variance /= h.Count
|
||||
if varianceToResult != nil {
|
||||
variance = varianceToResult(variance)
|
||||
}
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: sample.Metric,
|
||||
F: variance,
|
||||
DropName: true,
|
||||
})
|
||||
}
|
||||
return enh.Out, nil
|
||||
return variance
|
||||
}), nil
|
||||
}
|
||||
|
||||
// === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
|
@ -1404,6 +1342,10 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
|
|||
|
||||
// Deal with the native histograms.
|
||||
for _, sample := range enh.nativeHistogramSamples {
|
||||
if sample.H == nil {
|
||||
// Native histogram conflicts with classic histogram at the same timestamp, ignore.
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
|
@ -1446,6 +1388,10 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
|
|||
|
||||
// Deal with the native histograms.
|
||||
for _, sample := range enh.nativeHistogramSamples {
|
||||
if sample.H == nil {
|
||||
// Native histogram conflicts with classic histogram at the same timestamp, ignore.
|
||||
continue
|
||||
}
|
||||
if !enh.enableDelayedNameRemoval {
|
||||
sample.Metric = sample.Metric.DropMetricName()
|
||||
}
|
||||
|
|
|
@ -452,7 +452,7 @@ positive_duration_expr : duration_expr
|
|||
offset_expr: expr OFFSET duration_expr
|
||||
{
|
||||
if numLit, ok := $3.(*NumberLiteral); ok {
|
||||
yylex.(*parser).addOffset($1, time.Duration(numLit.Val*1000)*time.Millisecond)
|
||||
yylex.(*parser).addOffset($1, time.Duration(math.Round(numLit.Val*float64(time.Second))))
|
||||
$$ = $1
|
||||
break
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ matrix_selector : expr LEFT_BRACKET positive_duration_expr RIGHT_BRACKET
|
|||
|
||||
var rangeNl time.Duration
|
||||
if numLit, ok := $3.(*NumberLiteral); ok {
|
||||
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
|
||||
rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
|
||||
}
|
||||
rangeExpr, _ := $3.(*DurationExpr)
|
||||
$$ = &MatrixSelector{
|
||||
|
@ -523,11 +523,11 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati
|
|||
var rangeNl time.Duration
|
||||
var stepNl time.Duration
|
||||
if numLit, ok := $3.(*NumberLiteral); ok {
|
||||
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
|
||||
rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
|
||||
}
|
||||
rangeExpr, _ := $3.(*DurationExpr)
|
||||
if numLit, ok := $5.(*NumberLiteral); ok {
|
||||
stepNl = time.Duration(numLit.Val*1000)*time.Millisecond
|
||||
stepNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
|
||||
}
|
||||
stepExpr, _ := $5.(*DurationExpr)
|
||||
$$ = &SubqueryExpr{
|
||||
|
@ -543,7 +543,7 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati
|
|||
{
|
||||
var rangeNl time.Duration
|
||||
if numLit, ok := $3.(*NumberLiteral); ok {
|
||||
rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond
|
||||
rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second)))
|
||||
}
|
||||
rangeExpr, _ := $3.(*DurationExpr)
|
||||
$$ = &SubqueryExpr{
|
||||
|
|
|
@ -1372,7 +1372,7 @@ yydefault:
|
|||
yyDollar = yyS[yypt-3 : yypt+1]
|
||||
{
|
||||
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
|
||||
yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(numLit.Val*1000)*time.Millisecond)
|
||||
yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(math.Round(numLit.Val*float64(time.Second))))
|
||||
yyVAL.node = yyDollar[1].node
|
||||
break
|
||||
}
|
||||
|
@ -1423,7 +1423,7 @@ yydefault:
|
|||
|
||||
var rangeNl time.Duration
|
||||
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
|
||||
rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond
|
||||
rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
|
||||
}
|
||||
rangeExpr, _ := yyDollar[3].node.(*DurationExpr)
|
||||
yyVAL.node = &MatrixSelector{
|
||||
|
@ -1439,11 +1439,11 @@ yydefault:
|
|||
var rangeNl time.Duration
|
||||
var stepNl time.Duration
|
||||
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
|
||||
rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond
|
||||
rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
|
||||
}
|
||||
rangeExpr, _ := yyDollar[3].node.(*DurationExpr)
|
||||
if numLit, ok := yyDollar[5].node.(*NumberLiteral); ok {
|
||||
stepNl = time.Duration(numLit.Val*1000) * time.Millisecond
|
||||
stepNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
|
||||
}
|
||||
stepExpr, _ := yyDollar[5].node.(*DurationExpr)
|
||||
yyVAL.node = &SubqueryExpr{
|
||||
|
@ -1460,7 +1460,7 @@ yydefault:
|
|||
{
|
||||
var rangeNl time.Duration
|
||||
if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok {
|
||||
rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond
|
||||
rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second)))
|
||||
}
|
||||
rangeExpr, _ := yyDollar[3].node.(*DurationExpr)
|
||||
yyVAL.node = &SubqueryExpr{
|
||||
|
|
|
@ -674,10 +674,10 @@ func lexInsideBraces(l *Lexer) stateFn {
|
|||
l.backup()
|
||||
l.emit(EQL)
|
||||
case r == '!':
|
||||
switch nr := l.next(); {
|
||||
case nr == '~':
|
||||
switch nr := l.next(); nr {
|
||||
case '~':
|
||||
l.emit(NEQ_REGEX)
|
||||
case nr == '=':
|
||||
case '=':
|
||||
l.emit(NEQ)
|
||||
default:
|
||||
return l.errorf("unexpected character after '!' inside braces: %q", nr)
|
||||
|
|
|
@ -2008,6 +2008,57 @@ var testExpr = []struct {
|
|||
errMsg: `unexpected "}" in label matching, expected string`,
|
||||
},
|
||||
// Test matrix selector.
|
||||
{
|
||||
input: "test[1000ms]",
|
||||
expected: &MatrixSelector{
|
||||
VectorSelector: &VectorSelector{
|
||||
Name: "test",
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
|
||||
},
|
||||
PosRange: posrange.PositionRange{
|
||||
Start: 0,
|
||||
End: 4,
|
||||
},
|
||||
},
|
||||
Range: 1000 * time.Millisecond,
|
||||
EndPos: 12,
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "test[1001ms]",
|
||||
expected: &MatrixSelector{
|
||||
VectorSelector: &VectorSelector{
|
||||
Name: "test",
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
|
||||
},
|
||||
PosRange: posrange.PositionRange{
|
||||
Start: 0,
|
||||
End: 4,
|
||||
},
|
||||
},
|
||||
Range: 1001 * time.Millisecond,
|
||||
EndPos: 12,
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "test[1002ms]",
|
||||
expected: &MatrixSelector{
|
||||
VectorSelector: &VectorSelector{
|
||||
Name: "test",
|
||||
LabelMatchers: []*labels.Matcher{
|
||||
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
|
||||
},
|
||||
PosRange: posrange.PositionRange{
|
||||
Start: 0,
|
||||
End: 4,
|
||||
},
|
||||
},
|
||||
Range: 1002 * time.Millisecond,
|
||||
EndPos: 12,
|
||||
},
|
||||
},
|
||||
{
|
||||
input: "test[5s]",
|
||||
expected: &MatrixSelector{
|
||||
|
|
|
@ -111,7 +111,7 @@ eval range from <start> to <end> step <step> <query>
|
|||
### `expect` Syntax
|
||||
|
||||
```
|
||||
expect <type> <match_type> <string>
|
||||
expect <type> <match_type>: <string>
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
@ -139,8 +139,8 @@ eval instant at 1m sum by (env) (my_metric)
|
|||
{env="test"} 20
|
||||
|
||||
eval range from 0 to 3m step 1m sum by (env) (my_metric)
|
||||
expect warn msg something went wrong
|
||||
expect info regex something went (wrong|boom)
|
||||
expect warn msg: something went wrong
|
||||
expect info regex: something went (wrong|boom)
|
||||
{env="prod"} 2 5 10 20
|
||||
{env="test"} 10 20 30 45
|
||||
|
||||
|
@ -148,10 +148,10 @@ eval instant at 1m ceil({__name__=~'testmetric1|testmetric2'})
|
|||
expect fail
|
||||
|
||||
eval instant at 1m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expect fail msg "vector cannot contain metrics with the same labelset"
|
||||
expect fail msg: "vector cannot contain metrics with the same labelset"
|
||||
|
||||
eval instant at 1m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expect fail regex "vector cannot contain metrics .*|something else went wrong"
|
||||
expect fail regex: "vector cannot contain metrics .*|something else went wrong"
|
||||
|
||||
eval instant at 1m sum by (env) (my_metric)
|
||||
expect ordered
|
||||
|
|
|
@ -271,7 +271,7 @@ func parseExpect(defLine string) (expectCmdType, expectCmd, error) {
|
|||
expectParts := patExpect.FindStringSubmatch(strings.TrimSpace(defLine))
|
||||
expCmd := expectCmd{}
|
||||
if expectParts == nil {
|
||||
return 0, expCmd, errors.New("invalid expect statement, must match `expect <type> <match_type> <string>` format")
|
||||
return 0, expCmd, errors.New("invalid expect statement, must match `expect <type> <match_type>: <string>` format")
|
||||
}
|
||||
var (
|
||||
mode = expectParts[1]
|
||||
|
|
|
@ -699,7 +699,7 @@ load 5m
|
|||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||
expect fail error: something went wrong
|
||||
`,
|
||||
expectedError: "error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid expect statement, must match `expect <type> <match_type> <string>` format",
|
||||
expectedError: "error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid expect statement, must match `expect <type> <match_type>: <string>` format",
|
||||
},
|
||||
"instant query expected not to care about annotations (with new eval syntax)": {
|
||||
input: `
|
||||
|
|
|
@ -274,7 +274,7 @@ load 5m
|
|||
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
|
||||
http_requests_histogram{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:10}}x11
|
||||
http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11
|
||||
foo 3+0x10
|
||||
foo 1+1x9 3
|
||||
|
||||
eval_ordered instant at 50m topk(3, http_requests)
|
||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||
|
@ -340,6 +340,13 @@ eval_ordered instant at 50m topk(scalar(foo), http_requests)
|
|||
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||
http_requests{group="production", instance="1", job="app-server"} 600
|
||||
|
||||
# Bug #15971.
|
||||
eval range from 0m to 50m step 5m count(topk(scalar(foo), http_requests))
|
||||
{} 1 2 3 4 5 6 7 8 9 9 3
|
||||
|
||||
eval range from 0m to 50m step 5m count(bottomk(scalar(foo), http_requests))
|
||||
{} 1 2 3 4 5 6 7 8 9 9 3
|
||||
|
||||
# Tests for histogram: should ignore histograms.
|
||||
eval_info instant at 50m topk(100, http_requests_histogram)
|
||||
#empty
|
||||
|
@ -447,7 +454,7 @@ load 10s
|
|||
data{test="uneven samples",point="b"} 1
|
||||
data{test="uneven samples",point="c"} 4
|
||||
data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}
|
||||
foo .8
|
||||
foo 0 1 0 1 0 1 0.8
|
||||
|
||||
eval instant at 1m quantile without(point)(0.8, data)
|
||||
{test="two samples"} 0.8
|
||||
|
@ -475,11 +482,18 @@ eval instant at 1m quantile without(point)((scalar(foo)), data)
|
|||
{test="three samples"} 1.6
|
||||
{test="uneven samples"} 2.8
|
||||
|
||||
eval_warn instant at 1m quantile without(point)(NaN, data)
|
||||
eval instant at 1m quantile without(point)(NaN, data)
|
||||
expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
|
||||
{test="two samples"} NaN
|
||||
{test="three samples"} NaN
|
||||
{test="uneven samples"} NaN
|
||||
|
||||
# Bug #15971.
|
||||
eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data)
|
||||
{test="two samples"} 0 1 0 1 0 1 0.8
|
||||
{test="three samples"} 0 2 0 2 0 2 1.6
|
||||
{test="uneven samples"} 0 4 0 4 0 4 2.8
|
||||
|
||||
# Tests for group.
|
||||
clear
|
||||
|
||||
|
|
|
@ -1019,6 +1019,47 @@ eval instant at 1m sum_over_time(metric[2m])
|
|||
eval instant at 1m avg_over_time(metric[2m])
|
||||
{} 0.5
|
||||
|
||||
# Test per-series aggregation on dense samples.
|
||||
clear
|
||||
load 1ms
|
||||
metric 1+0x4000
|
||||
|
||||
eval instant at 4s sum_over_time(metric[1000ms])
|
||||
{} 1000
|
||||
|
||||
eval instant at 4s sum_over_time(metric[1001ms])
|
||||
{} 1001
|
||||
|
||||
eval instant at 4s sum_over_time(metric[1002ms])
|
||||
{} 1002
|
||||
|
||||
eval instant at 4s sum_over_time(metric[1003ms])
|
||||
{} 1003
|
||||
|
||||
eval instant at 4s sum_over_time(metric[2000ms])
|
||||
{} 2000
|
||||
|
||||
eval instant at 4s sum_over_time(metric[2001ms])
|
||||
{} 2001
|
||||
|
||||
eval instant at 4s sum_over_time(metric[2002ms])
|
||||
{} 2002
|
||||
|
||||
eval instant at 4s sum_over_time(metric[2003ms])
|
||||
{} 2003
|
||||
|
||||
eval instant at 4s sum_over_time(metric[3000ms])
|
||||
{} 3000
|
||||
|
||||
eval instant at 4s sum_over_time(metric[3001ms])
|
||||
{} 3001
|
||||
|
||||
eval instant at 4s sum_over_time(metric[3002ms])
|
||||
{} 3002
|
||||
|
||||
eval instant at 4s sum_over_time(metric[3003ms])
|
||||
{} 3003
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time.
|
||||
clear
|
||||
load 10s
|
||||
|
|
|
@ -584,3 +584,25 @@ eval instant at 10m histogram_count(increase(histogram_with_reset[15m]))
|
|||
|
||||
eval instant at 10m histogram_sum(increase(histogram_with_reset[15m]))
|
||||
{} 91.5
|
||||
|
||||
clear
|
||||
|
||||
# Test histogram_quantile and histogram_fraction with conflicting classic and native histograms.
|
||||
load 1m
|
||||
series{host="a"} {{schema:0 sum:5 count:4 buckets:[9 2 1]}}
|
||||
series{host="a", le="0.1"} 2
|
||||
series{host="a", le="1"} 3
|
||||
series{host="a", le="10"} 5
|
||||
series{host="a", le="100"} 6
|
||||
series{host="a", le="1000"} 8
|
||||
series{host="a", le="+Inf"} 9
|
||||
|
||||
eval instant at 0 histogram_quantile(0.8, series)
|
||||
expect no_info
|
||||
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
|
||||
# Should return no results.
|
||||
|
||||
eval instant at 0 histogram_fraction(-Inf, 1, series)
|
||||
expect no_info
|
||||
expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series"
|
||||
# Should return no results.
|
||||
|
|
|
@ -11,6 +11,8 @@ load 5m
|
|||
http_requests{job="api-server", instance="3", group="canary"} 0+60x10
|
||||
http_requests{job="api-server", instance="histogram_1", group="canary"} {{schema:0 sum:10 count:10}}x11
|
||||
http_requests{job="api-server", instance="histogram_2", group="canary"} {{schema:0 sum:20 count:20}}x11
|
||||
foo 1+1x10
|
||||
bar 0 1 0 -1 0 1 0 -1 0 1 0
|
||||
|
||||
eval instant at 50m count(limitk by (group) (0, http_requests))
|
||||
# empty
|
||||
|
@ -69,6 +71,10 @@ eval instant at 50m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"
|
|||
eval range from 0 to 50m step 5m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"}))
|
||||
{} 2+0x10
|
||||
|
||||
# Bug #15971.
|
||||
eval range from 0m to 50m step 5m count(limitk(scalar(foo), http_requests))
|
||||
{} 1 2 3 4 5 6 7 8 8 8 8
|
||||
|
||||
# limit_ratio
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests))
|
||||
# empty
|
||||
|
@ -105,11 +111,13 @@ eval range from 0 to 50m step 5m count(limit_ratio(-1.0, http_requests) and http
|
|||
{} 8+0x10
|
||||
|
||||
# Capped to 1.0 -> all samples.
|
||||
eval_warn range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests)
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests)
|
||||
expect warn msg: PromQL warning: ratio value should be between -1 and 1, got 1.1, capping to 1
|
||||
{} 8+0x10
|
||||
|
||||
# Capped to -1.0 -> all samples.
|
||||
eval_warn range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests)
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests)
|
||||
expect warn msg: PromQL warning: ratio value should be between -1 and 1, got -1.1, capping to -1
|
||||
{} 8+0x10
|
||||
|
||||
# Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other.
|
||||
|
@ -137,12 +145,12 @@ eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) or limit_
|
|||
eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit_ratio(-0.2, http_requests))
|
||||
# empty
|
||||
|
||||
# Complement below for [some_ratio, 1.0 - some_ratio], some_ratio derived from time(),
|
||||
# Complement below for [some_ratio, - (1.0 - some_ratio)], some_ratio derived from time(),
|
||||
# using a small prime number to avoid rounded ratio values, and a small set of them.
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio(1.0 - (time() % 17/17), http_requests))
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio( - (1.0 - (time() % 17/17)), http_requests))
|
||||
{} 8+0x10
|
||||
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio(1.0 - (time() % 17/17), http_requests))
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio( - (1.0 - (time() % 17/17)), http_requests))
|
||||
# empty
|
||||
|
||||
# Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time).
|
||||
|
@ -156,3 +164,7 @@ eval instant at 50m limit_ratio(1, http_requests{instance="histogram_1"})
|
|||
|
||||
eval range from 0 to 50m step 5m limit_ratio(1, http_requests{instance="histogram_1"})
|
||||
{__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}}x10
|
||||
|
||||
# Bug #15971.
|
||||
eval range from 0m to 50m step 5m count(limit_ratio(scalar(bar), http_requests))
|
||||
{} _ 8 _ 8 _ 8 _ 8 _ 8 _
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -533,3 +534,68 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
|
|||
func (ssi *storageSeriesIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fParams struct {
|
||||
series Series
|
||||
constValue float64
|
||||
isConstant bool
|
||||
minValue float64
|
||||
maxValue float64
|
||||
hasAnyNaN bool
|
||||
}
|
||||
|
||||
// newFParams evaluates the expression and returns an fParams object,
|
||||
// which holds the parameter values (constant or series) along with min, max, and NaN info.
|
||||
func newFParams(ctx context.Context, ev *evaluator, expr parser.Expr) (*fParams, annotations.Annotations) {
|
||||
if expr == nil {
|
||||
return &fParams{}, nil
|
||||
}
|
||||
var constParam bool
|
||||
if _, ok := expr.(*parser.NumberLiteral); ok {
|
||||
constParam = true
|
||||
}
|
||||
val, ws := ev.eval(ctx, expr)
|
||||
mat, ok := val.(Matrix)
|
||||
if !ok || len(mat) == 0 {
|
||||
return &fParams{}, ws
|
||||
}
|
||||
fp := &fParams{
|
||||
series: mat[0],
|
||||
isConstant: constParam,
|
||||
minValue: math.MaxFloat64,
|
||||
maxValue: -math.MaxFloat64,
|
||||
}
|
||||
|
||||
if constParam {
|
||||
fp.constValue = fp.series.Floats[0].F
|
||||
fp.minValue, fp.maxValue = fp.constValue, fp.constValue
|
||||
fp.hasAnyNaN = math.IsNaN(fp.constValue)
|
||||
return fp, ws
|
||||
}
|
||||
|
||||
for _, v := range fp.series.Floats {
|
||||
fp.maxValue = math.Max(fp.maxValue, v.F)
|
||||
fp.minValue = math.Min(fp.minValue, v.F)
|
||||
if math.IsNaN(v.F) {
|
||||
fp.hasAnyNaN = true
|
||||
}
|
||||
}
|
||||
return fp, ws
|
||||
}
|
||||
|
||||
func (fp *fParams) Max() float64 { return fp.maxValue }
|
||||
func (fp *fParams) Min() float64 { return fp.minValue }
|
||||
func (fp *fParams) HasAnyNaN() bool { return fp.hasAnyNaN }
|
||||
|
||||
// Next returns the next value from the series or the constant value, and advances the series if applicable.
|
||||
func (fp *fParams) Next() float64 {
|
||||
if fp.isConstant {
|
||||
return fp.constValue
|
||||
}
|
||||
if len(fp.series.Floats) > 0 {
|
||||
val := fp.series.Floats[0].F
|
||||
fp.series.Floats = fp.series.Floats[1:]
|
||||
return val
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -180,7 +181,7 @@ func TestAlertingRule(t *testing.T) {
|
|||
for i := range test.result {
|
||||
test.result[i].T = timestamp.FromTime(evalTime)
|
||||
}
|
||||
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||
|
||||
sort.Slice(filteredRes, func(i, j int) bool {
|
||||
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
||||
|
@ -188,7 +189,7 @@ func TestAlertingRule(t *testing.T) {
|
|||
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
||||
|
||||
for _, aa := range rule.ActiveAlerts() {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -333,7 +334,7 @@ func TestForStateAddSamples(t *testing.T) {
|
|||
test.result[i].F = forState
|
||||
}
|
||||
}
|
||||
require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||
require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
|
||||
|
||||
sort.Slice(filteredRes, func(i, j int) bool {
|
||||
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
|
||||
|
@ -341,7 +342,7 @@ func TestForStateAddSamples(t *testing.T) {
|
|||
prom_testutil.RequireEqual(t, test.result, filteredRes)
|
||||
|
||||
for _, aa := range rule.ActiveAlerts() {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -489,7 +490,7 @@ func TestForStateRestore(t *testing.T) {
|
|||
|
||||
got := newRule.ActiveAlerts()
|
||||
for _, aa := range got {
|
||||
require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
|
||||
}
|
||||
sort.Slice(got, func(i, j int) bool {
|
||||
return labels.Compare(got[i].Labels, got[j].Labels) < 0
|
||||
|
@ -513,7 +514,7 @@ func TestForStateRestore(t *testing.T) {
|
|||
}
|
||||
default:
|
||||
exp := tt.expectedAlerts
|
||||
require.Equal(t, len(exp), len(got))
|
||||
require.Len(t, got, len(exp))
|
||||
sortAlerts(exp)
|
||||
sortAlerts(got)
|
||||
for i, e := range exp {
|
||||
|
@ -1008,11 +1009,8 @@ func TestMetricsUpdate(t *testing.T) {
|
|||
var metrics int
|
||||
for _, m := range ms {
|
||||
s := m.GetName()
|
||||
for _, n := range metricNames {
|
||||
if s == n {
|
||||
metrics += len(m.Metric)
|
||||
break
|
||||
}
|
||||
if slices.Contains(metricNames, s) {
|
||||
metrics += len(m.Metric)
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
|
@ -2442,7 +2440,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
|||
wg.Wait()
|
||||
|
||||
// Synchronous queries also count towards inflight, so at most we can have maxConcurrency+$groupCount inflight evaluations.
|
||||
require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
||||
require.Equal(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount))
|
||||
}
|
||||
|
||||
func TestUpdateWhenStopped(t *testing.T) {
|
||||
|
|
|
@ -366,7 +366,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
|
|||
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
|
||||
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
|
||||
)
|
||||
|
||||
|
@ -522,7 +522,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|||
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
||||
mrc = sp.config.MetricRelabelConfigs
|
||||
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms
|
||||
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled()
|
||||
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled()
|
||||
)
|
||||
|
||||
|
|
|
@ -1157,7 +1157,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
case <-time.After(5 * time.Second):
|
||||
require.FailNow(t, "Cancellation during initial offset failed.")
|
||||
case err := <-errc:
|
||||
require.FailNow(t, "Unexpected error: %s", err)
|
||||
require.FailNow(t, "Unexpected error", "err: %s", err)
|
||||
}
|
||||
|
||||
// The provided timeout must cause cancellation of the context passed down to the
|
||||
|
@ -1200,7 +1200,7 @@ func TestScrapeLoopRun(t *testing.T) {
|
|||
case <-signal:
|
||||
// Loop terminated as expected.
|
||||
case err := <-errc:
|
||||
require.FailNow(t, "Unexpected error: %s", err)
|
||||
require.FailNow(t, "Unexpected error", "err: %s", err)
|
||||
case <-time.After(3 * time.Second):
|
||||
require.FailNow(t, "Loop did not terminate on context cancellation")
|
||||
}
|
||||
|
@ -1309,14 +1309,14 @@ test_metric_total 1
|
|||
md, ok = cache.GetMetadata("test_metric_no_help")
|
||||
require.True(t, ok, "expected metadata to be present")
|
||||
require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type")
|
||||
require.Equal(t, "", md.Help)
|
||||
require.Equal(t, "", md.Unit)
|
||||
require.Empty(t, md.Help)
|
||||
require.Empty(t, md.Unit)
|
||||
|
||||
md, ok = cache.GetMetadata("test_metric_no_type")
|
||||
require.True(t, ok, "expected metadata to be present")
|
||||
require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type")
|
||||
require.Equal(t, "other help text", md.Help)
|
||||
require.Equal(t, "", md.Unit)
|
||||
require.Empty(t, md.Unit)
|
||||
}
|
||||
|
||||
func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) {
|
||||
|
@ -1567,7 +1567,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) {
|
|||
if numScrapes == cue {
|
||||
action(sl)
|
||||
}
|
||||
w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)))
|
||||
fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes)
|
||||
return nil
|
||||
}
|
||||
sl.run(nil)
|
||||
|
@ -4259,7 +4259,7 @@ test_summary_count 199
|
|||
foundLeValues[v] = true
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected")
|
||||
require.Len(t, foundLeValues, len(expectedValues), "number of label values not as expected")
|
||||
for _, v := range expectedValues {
|
||||
require.Contains(t, foundLeValues, v, "label value not found")
|
||||
}
|
||||
|
@ -4568,7 +4568,7 @@ metric: <
|
|||
foundLeValues[v] = true
|
||||
}
|
||||
|
||||
require.Equal(t, len(expectedValues), len(foundLeValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues)
|
||||
require.Len(t, foundLeValues, len(expectedValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues)
|
||||
for _, v := range expectedValues {
|
||||
require.Contains(t, foundLeValues, v, "label value not found")
|
||||
}
|
||||
|
@ -4635,26 +4635,26 @@ metric: <
|
|||
fals := false
|
||||
for metricsTextName, metricsText := range metricsTexts {
|
||||
for name, tc := range map[string]struct {
|
||||
alwaysScrapeClassicHistograms bool
|
||||
alwaysScrapeClassicHistograms *bool
|
||||
convertClassicHistToNHCB *bool
|
||||
}{
|
||||
"convert with scrape": {
|
||||
alwaysScrapeClassicHistograms: true,
|
||||
alwaysScrapeClassicHistograms: &tru,
|
||||
convertClassicHistToNHCB: &tru,
|
||||
},
|
||||
"convert without scrape": {
|
||||
alwaysScrapeClassicHistograms: false,
|
||||
alwaysScrapeClassicHistograms: &fals,
|
||||
convertClassicHistToNHCB: &tru,
|
||||
},
|
||||
"scrape without convert": {
|
||||
alwaysScrapeClassicHistograms: true,
|
||||
alwaysScrapeClassicHistograms: &tru,
|
||||
convertClassicHistToNHCB: &fals,
|
||||
},
|
||||
"scrape with nil convert": {
|
||||
alwaysScrapeClassicHistograms: true,
|
||||
alwaysScrapeClassicHistograms: &tru,
|
||||
},
|
||||
"neither scrape nor convert": {
|
||||
alwaysScrapeClassicHistograms: false,
|
||||
alwaysScrapeClassicHistograms: &fals,
|
||||
convertClassicHistToNHCB: &fals,
|
||||
},
|
||||
} {
|
||||
|
@ -4664,7 +4664,7 @@ metric: <
|
|||
expectedNativeHistCount = 1
|
||||
expectCustomBuckets = false
|
||||
expectedClassicHistCount = 0
|
||||
if metricsText.hasClassic && tc.alwaysScrapeClassicHistograms {
|
||||
if metricsText.hasClassic && tc.alwaysScrapeClassicHistograms != nil && *tc.alwaysScrapeClassicHistograms {
|
||||
expectedClassicHistCount = 1
|
||||
}
|
||||
} else if metricsText.hasClassic {
|
||||
|
@ -4672,11 +4672,11 @@ metric: <
|
|||
case tc.convertClassicHistToNHCB == nil || !*tc.convertClassicHistToNHCB:
|
||||
expectedClassicHistCount = 1
|
||||
expectedNativeHistCount = 0
|
||||
case tc.alwaysScrapeClassicHistograms && *tc.convertClassicHistToNHCB:
|
||||
case tc.alwaysScrapeClassicHistograms != nil && *tc.alwaysScrapeClassicHistograms && *tc.convertClassicHistToNHCB:
|
||||
expectedClassicHistCount = 1
|
||||
expectedNativeHistCount = 1
|
||||
expectCustomBuckets = true
|
||||
case !tc.alwaysScrapeClassicHistograms && *tc.convertClassicHistToNHCB:
|
||||
case (tc.alwaysScrapeClassicHistograms == nil || !*tc.alwaysScrapeClassicHistograms) && *tc.convertClassicHistToNHCB:
|
||||
expectedClassicHistCount = 0
|
||||
expectedNativeHistCount = 1
|
||||
expectCustomBuckets = true
|
||||
|
@ -4817,7 +4817,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *
|
|||
|
||||
switch numScrapes {
|
||||
case 1:
|
||||
w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))))
|
||||
fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond))
|
||||
return nil
|
||||
case 5:
|
||||
cancel()
|
||||
|
@ -4867,7 +4867,7 @@ func TestScrapeLoopCompression(t *testing.T) {
|
|||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header")
|
||||
fmt.Fprint(w, metricsText)
|
||||
fmt.Fprint(w, string(metricsText))
|
||||
close(scraped)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
@ -5164,7 +5164,7 @@ scrape_configs:
|
|||
|
||||
s := teststorage.New(t)
|
||||
defer s.Close()
|
||||
s.DB.EnableNativeHistograms()
|
||||
s.EnableNativeHistograms()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg)
|
||||
|
|
|
@ -144,7 +144,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) {
|
|||
func (t *Target) hash() uint64 {
|
||||
h := fnv.New64a()
|
||||
|
||||
h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash())))
|
||||
fmt.Fprintf(h, "%016d", t.labels.Hash())
|
||||
h.Write([]byte(t.URL().String()))
|
||||
|
||||
return h.Sum64()
|
||||
|
|
|
@ -36,4 +36,4 @@ jobs:
|
|||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0
|
||||
with:
|
||||
args: --verbose
|
||||
version: v2.0.2
|
||||
version: v2.1.5
|
||||
|
|
|
@ -10,7 +10,7 @@ git_user="prombot"
|
|||
branch="repo_sync"
|
||||
commit_msg="Update common Prometheus files"
|
||||
pr_title="Synchronize common files from prometheus/prometheus"
|
||||
pr_msg="Propagating changes from prometheus/prometheus default branch."
|
||||
pr_msg="Propagating changes from prometheus/prometheus default branch.\n\n*Source can be found [here](https://github.com/prometheus/prometheus/blob/main/scripts/sync_repo_files.sh).*"
|
||||
orgs="prometheus prometheus-community"
|
||||
|
||||
color_red='\e[31m'
|
||||
|
|
|
@ -64,10 +64,8 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
|
|||
queriers = append(queriers, newSecondaryQuerierFrom(q))
|
||||
}
|
||||
|
||||
concurrentSelect := false
|
||||
if len(secondaries) > 0 {
|
||||
concurrentSelect = true
|
||||
}
|
||||
concurrentSelect := len(secondaries) > 0
|
||||
|
||||
return &querierAdapter{&mergeGenericQuerier{
|
||||
mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge,
|
||||
queriers: queriers,
|
||||
|
@ -111,10 +109,8 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
|
|||
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
|
||||
}
|
||||
|
||||
concurrentSelect := false
|
||||
if len(secondaries) > 0 {
|
||||
concurrentSelect = true
|
||||
}
|
||||
concurrentSelect := len(secondaries) > 0
|
||||
|
||||
return &chunkQuerierAdapter{&mergeGenericQuerier{
|
||||
mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge,
|
||||
queriers: queriers,
|
||||
|
|
|
@ -210,7 +210,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
|||
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||
}
|
||||
// internal labels should be maintained
|
||||
if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
|
||||
if !settings.AllowUTF8 && (len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__") {
|
||||
name = otlptranslator.NormalizeLabel(name)
|
||||
}
|
||||
l[name] = extras[i+1]
|
||||
|
|
|
@ -102,8 +102,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||
// Cumulative temporality is always valid.
|
||||
// Delta temporality is also valid if AllowDeltaTemporality is true.
|
||||
// All other temporality values are invalid.
|
||||
!(temporality == pmetric.AggregationTemporalityCumulative ||
|
||||
(settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) {
|
||||
(temporality != pmetric.AggregationTemporalityCumulative &&
|
||||
(!settings.AllowDeltaTemporality || temporality != pmetric.AggregationTemporalityDelta)) {
|
||||
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -515,10 +515,8 @@ func NewQueueManager(
|
|||
compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use.
|
||||
}
|
||||
|
||||
walMetadata := false
|
||||
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
walMetadata = true
|
||||
}
|
||||
walMetadata := t.protoMsg != config.RemoteWriteProtoMsgV1
|
||||
|
||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
|
||||
|
||||
// The current MetadataWatcher implementation is mutually exclusive
|
||||
|
|
|
@ -93,7 +93,7 @@ func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage.
|
|||
noop bool
|
||||
err error
|
||||
)
|
||||
cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt)
|
||||
cq.maxt, noop, err = c.preferLocalStorage(mint, maxt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ func TestStreamReadEndpoint(t *testing.T) {
|
|||
require.Equal(t, 2, recorder.Code/100)
|
||||
|
||||
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
|
||||
require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
|
||||
require.Empty(t, recorder.Result().Header.Get("Content-Encoding"))
|
||||
|
||||
var results []*prompb.ChunkedReadResponse
|
||||
stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil)
|
||||
|
|
|
@ -112,7 +112,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) {
|
|||
require.Len(t, ssSlice, 2)
|
||||
var iter chunkenc.Iterator
|
||||
for i, s := range ssSlice {
|
||||
require.EqualValues(t, series[i].lbs, s.Labels())
|
||||
require.Equal(t, series[i].lbs, s.Labels())
|
||||
iter = s.Iterator(iter)
|
||||
j := 0
|
||||
for iter.Next() == chunkenc.ValFloat {
|
||||
|
@ -597,15 +597,15 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) {
|
|||
}
|
||||
series := NewListSeries(lbs, copiedSamples)
|
||||
encoder := NewSeriesToChunkEncoder(series)
|
||||
require.EqualValues(t, lbs, encoder.Labels())
|
||||
require.Equal(t, lbs, encoder.Labels())
|
||||
|
||||
chks, err := ExpandChunks(encoder.Iterator(nil))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(test.expectedCounterResetHeaders), len(chks))
|
||||
require.Len(t, chks, len(test.expectedCounterResetHeaders))
|
||||
|
||||
// Decode all encoded samples and assert they are equal to the original ones.
|
||||
encodedSamples := chunks.ChunkMetasToSamples(chks)
|
||||
require.Equal(t, len(test.expectedSamples), len(encodedSamples))
|
||||
require.Len(t, encodedSamples, len(test.expectedSamples))
|
||||
|
||||
for i, s := range test.expectedSamples {
|
||||
encodedSample := encodedSamples[i]
|
||||
|
|
|
@ -1305,7 +1305,7 @@ func TestDBCreatedTimestampSamplesIngestion(t *testing.T) {
|
|||
|
||||
outputSamples := readWALSamples(t, s.wal.Dir())
|
||||
|
||||
require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples))
|
||||
require.Len(t, outputSamples, len(tc.expectedSamples), "Expected %d samples", len(tc.expectedSamples))
|
||||
|
||||
for i, expectedSample := range tc.expectedSamples {
|
||||
for _, sample := range outputSamples {
|
||||
|
|
|
@ -77,13 +77,13 @@ func TestNoDeadlock(t *testing.T) {
|
|||
|
||||
func labelsWithHashCollision() (labels.Labels, labels.Labels) {
|
||||
// These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions
|
||||
ls1 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y")
|
||||
ls2 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF")
|
||||
ls1 := labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl")
|
||||
ls2 := labels.FromStrings("__name__", "metric", "lbl", "RqcXatm")
|
||||
|
||||
if ls1.Hash() != ls2.Hash() {
|
||||
// These ones are the same when using -tags stringlabels
|
||||
ls1 = labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl")
|
||||
ls2 = labels.FromStrings("__name__", "metric", "lbl", "RqcXatm")
|
||||
// These ones are the same when using -tags slicelabels
|
||||
ls1 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y")
|
||||
ls2 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF")
|
||||
}
|
||||
|
||||
if ls1.Hash() != ls2.Hash() {
|
||||
|
|
|
@ -66,10 +66,10 @@ type IndexReader interface {
|
|||
Symbols() index.StringIter
|
||||
|
||||
// SortedLabelValues returns sorted possible label values.
|
||||
SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error)
|
||||
SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error)
|
||||
|
||||
// LabelValues returns possible label values which may not be sorted.
|
||||
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error)
|
||||
LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error)
|
||||
|
||||
// Postings returns the postings list iterator for the label pairs.
|
||||
// The Postings here contain the offsets to the series inside the index.
|
||||
|
@ -475,14 +475,14 @@ func (r blockIndexReader) Symbols() index.StringIter {
|
|||
return r.ir.Symbols()
|
||||
}
|
||||
|
||||
func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
|
||||
var st []string
|
||||
var err error
|
||||
|
||||
if len(matchers) == 0 {
|
||||
st, err = r.ir.SortedLabelValues(ctx, name)
|
||||
st, err = r.ir.SortedLabelValues(ctx, name, hints)
|
||||
} else {
|
||||
st, err = r.LabelValues(ctx, name, matchers...)
|
||||
st, err = r.LabelValues(ctx, name, hints, matchers...)
|
||||
if err == nil {
|
||||
slices.Sort(st)
|
||||
}
|
||||
|
@ -493,16 +493,16 @@ func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, ma
|
|||
return st, nil
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
func (r blockIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) {
|
||||
if len(matchers) == 0 {
|
||||
st, err := r.ir.LabelValues(ctx, name)
|
||||
st, err := r.ir.LabelValues(ctx, name, hints)
|
||||
if err != nil {
|
||||
return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
return labelValuesWithMatchers(ctx, r.ir, name, matchers...)
|
||||
return labelValuesWithMatchers(ctx, r.ir, name, hints, matchers...)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) {
|
||||
|
|
|
@ -299,11 +299,11 @@ func TestLabelValuesWithMatchers(t *testing.T) {
|
|||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actualValues, err := indexReader.SortedLabelValues(ctx, tt.labelName, tt.matchers...)
|
||||
actualValues, err := indexReader.SortedLabelValues(ctx, tt.labelName, nil, tt.matchers...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedValues, actualValues)
|
||||
|
||||
actualValues, err = indexReader.LabelValues(ctx, tt.labelName, tt.matchers...)
|
||||
actualValues, err = indexReader.LabelValues(ctx, tt.labelName, nil, tt.matchers...)
|
||||
sort.Strings(actualValues)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedValues, actualValues)
|
||||
|
@ -459,7 +459,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
|
|||
b.ReportAllocs()
|
||||
|
||||
for benchIdx := 0; benchIdx < b.N; benchIdx++ {
|
||||
actualValues, err := indexReader.LabelValues(ctx, "b_tens", matchers...)
|
||||
actualValues, err := indexReader.LabelValues(ctx, "b_tens", nil, matchers...)
|
||||
require.NoError(b, err)
|
||||
require.Len(b, actualValues, 9)
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|||
|
||||
// Checking on-disk bytes for the first file.
|
||||
require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
|
||||
require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
|
||||
require.Len(t, hrw.closers, len(hrw.mmappedChunkFiles))
|
||||
|
||||
actualBytes, err := os.ReadFile(firstFileName)
|
||||
require.NoError(t, err)
|
||||
|
@ -208,9 +208,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
|
||||
files, err := os.ReadDir(hrw.dir.Name())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(remainingFiles), len(files), "files on disk")
|
||||
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
|
||||
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
|
||||
require.Len(t, files, len(remainingFiles), "files on disk")
|
||||
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
|
||||
require.Len(t, hrw.closers, len(remainingFiles), "closers")
|
||||
|
||||
for _, i := range remainingFiles {
|
||||
_, ok := hrw.mmappedChunkFiles[i]
|
||||
|
@ -325,9 +325,9 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
|||
|
||||
files, err := os.ReadDir(hrw.dir.Name())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(remainingFiles), len(files), "files on disk")
|
||||
require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles")
|
||||
require.Equal(t, len(remainingFiles), len(hrw.closers), "closers")
|
||||
require.Len(t, files, len(remainingFiles), "files on disk")
|
||||
require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles")
|
||||
require.Len(t, hrw.closers, len(remainingFiles), "closers")
|
||||
|
||||
for _, i := range remainingFiles {
|
||||
_, ok := hrw.mmappedChunkFiles[i]
|
||||
|
|
|
@ -1399,7 +1399,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
|||
createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime))
|
||||
}
|
||||
require.NoError(t, db.reload())
|
||||
require.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks")
|
||||
require.Len(t, db.Blocks(), len(blocks), "unexpected block count after a reloadBlocks")
|
||||
|
||||
return len(blocks)
|
||||
},
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue