Merge branch 'main' into merge-3.5-into-main
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
commit
36ec1e4fb0
|
@ -19,7 +19,7 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Dependabot metadata
|
- name: Dependabot metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: dependabot/fetch-metadata@d7267f607e9d3fb96fc2fbe83e0af444713e90b7 # v2.3.0
|
uses: dependabot/fetch-metadata@08eff52bf64351f401fb50d4972fa95b9f2c2d1b # v2.4.0
|
||||||
with:
|
with:
|
||||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
- name: Enable auto-merge for Dependabot PRs
|
- name: Enable auto-merge for Dependabot PRs
|
||||||
|
|
|
@ -95,7 +95,7 @@ jobs:
|
||||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.24.x
|
go-version: 1.24.x
|
||||||
- run: |
|
- run: |
|
||||||
|
@ -205,7 +205,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||||
with:
|
with:
|
||||||
cache: false
|
cache: false
|
||||||
go-version: 1.24.x
|
go-version: 1.24.x
|
||||||
|
@ -220,18 +220,18 @@ jobs:
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.24.x
|
go-version: 1.24.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
if: github.repository == 'prometheus/snmp_exporter'
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0
|
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||||
with:
|
with:
|
||||||
args: --verbose
|
args: --verbose
|
||||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||||
version: v2.1.5
|
version: v2.2.1
|
||||||
fuzzing:
|
fuzzing:
|
||||||
uses: ./.github/workflows/fuzzing.yml
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
|
|
|
@ -29,12 +29,12 @@ jobs:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16
|
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||||
|
|
|
@ -26,7 +26,7 @@ jobs:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
- name: "Run analysis"
|
- name: "Run analysis"
|
||||||
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # tag=v2.4.1
|
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # tag=v2.4.2
|
||||||
with:
|
with:
|
||||||
results_file: results.sarif
|
results_file: results.sarif
|
||||||
results_format: sarif
|
results_format: sarif
|
||||||
|
@ -45,6 +45,6 @@ jobs:
|
||||||
|
|
||||||
# Upload the results to GitHub's code scanning dashboard.
|
# Upload the results to GitHub's code scanning dashboard.
|
||||||
- name: "Upload to code-scanning"
|
- name: "Upload to code-scanning"
|
||||||
uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # tag=v3.28.16
|
uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # tag=v3.29.2
|
||||||
with:
|
with:
|
||||||
sarif_file: results.sarif
|
sarif_file: results.sarif
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
## main / unreleased
|
## main / unreleased
|
||||||
|
|
||||||
|
* [BUGFIX] OTLP receiver: Generate `target_info` samples between the earliest and latest samples per resource. #16737
|
||||||
|
|
||||||
## 3.5.0 / 2025-07-14
|
## 3.5.0 / 2025-07-14
|
||||||
|
|
||||||
* [FEATURE] PromQL: Add experimental type and unit metadata labels, behind feature flag `type-and-unit-labels`. #16228 #16632 #16718 #16743
|
* [FEATURE] PromQL: Add experimental type and unit metadata labels, behind feature flag `type-and-unit-labels`. #16228 #16632 #16718 #16743
|
||||||
|
@ -246,6 +248,11 @@ This release includes new features such as a brand new UI and UTF-8 support enab
|
||||||
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
|
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
|
||||||
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
|
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
|
||||||
|
|
||||||
|
## 2.53.5 / 2025-06-30
|
||||||
|
|
||||||
|
* [ENHANCEMENT] TSDB: Add backward compatibility with the upcoming TSDB block index v3 #16762
|
||||||
|
* [BUGFIX] Top-level: Update GOGC before loading TSDB #16521
|
||||||
|
|
||||||
## 2.53.4 / 2025-03-18
|
## 2.53.4 / 2025-03-18
|
||||||
|
|
||||||
* [BUGFIX] Runtime: fix GOGC is being set to 0 when installed with empty prometheus.yml file resulting high cpu usage. #16090
|
* [BUGFIX] Runtime: fix GOGC is being set to 0 when installed with empty prometheus.yml file resulting high cpu usage. #16090
|
||||||
|
@ -260,7 +267,7 @@ This release includes new features such as a brand new UI and UTF-8 support enab
|
||||||
Fix a bug where Prometheus would crash with a segmentation fault if a remote-read
|
Fix a bug where Prometheus would crash with a segmentation fault if a remote-read
|
||||||
request accessed a block on disk at about the same time as TSDB created a new block.
|
request accessed a block on disk at about the same time as TSDB created a new block.
|
||||||
|
|
||||||
[BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
|
* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
|
||||||
|
|
||||||
## 2.55.1 / 2024-11-04
|
## 2.55.1 / 2024-11-04
|
||||||
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -189,6 +189,6 @@ update-all-go-deps:
|
||||||
@$(MAKE) update-go-deps
|
@$(MAKE) update-go-deps
|
||||||
@echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
|
@echo ">> updating Go dependencies in ./documentation/examples/remote_storage/"
|
||||||
@cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
@cd ./documentation/examples/remote_storage/ && for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||||
$(GO) get -d $$m; \
|
$(GO) get $$m; \
|
||||||
done
|
done
|
||||||
@cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
|
@cd ./documentation/examples/remote_storage/ && $(GO) mod tidy
|
||||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v2.1.5
|
GOLANGCI_LINT_VERSION ?= v2.2.1
|
||||||
GOLANGCI_FMT_OPTS ?=
|
GOLANGCI_FMT_OPTS ?=
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
|
@ -139,7 +139,7 @@ common-deps:
|
||||||
update-go-deps:
|
update-go-deps:
|
||||||
@echo ">> updating Go dependencies"
|
@echo ">> updating Go dependencies"
|
||||||
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||||
$(GO) get -d $$m; \
|
$(GO) get $$m; \
|
||||||
done
|
done
|
||||||
$(GO) mod tidy
|
$(GO) mod tidy
|
||||||
|
|
||||||
|
|
|
@ -67,9 +67,9 @@ Prometheus will now be reachable at <http://localhost:9090/>.
|
||||||
|
|
||||||
To build Prometheus from source code, You need:
|
To build Prometheus from source code, You need:
|
||||||
|
|
||||||
* Go [version 1.22 or greater](https://golang.org/doc/install).
|
* Go: Version specified in [go.mod](./go.mod) or greater.
|
||||||
* NodeJS [version 22 or greater](https://nodejs.org/).
|
* NodeJS: Version specified in [.nvmrc](./web/ui/.nvmrc) or greater.
|
||||||
* npm [version 8 or greater](https://www.npmjs.com/).
|
* npm: Version 8 or greater (check with `npm --version` and [here](https://www.npmjs.com/)).
|
||||||
|
|
||||||
Start by cloning the repository:
|
Start by cloning the repository:
|
||||||
|
|
||||||
|
|
|
@ -89,6 +89,8 @@ var (
|
||||||
lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval)
|
lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const httpConfigFileDescription = "HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var (
|
var (
|
||||||
httpRoundTripper = api.DefaultRoundTripper
|
httpRoundTripper = api.DefaultRoundTripper
|
||||||
|
@ -138,11 +140,11 @@ func main() {
|
||||||
).Required().ExistingFiles()
|
).Required().ExistingFiles()
|
||||||
|
|
||||||
checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.")
|
checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.")
|
||||||
checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
checkServerHealthCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
|
|
||||||
checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.")
|
checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.")
|
||||||
checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
checkServerReadyCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
|
|
||||||
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
||||||
|
@ -165,7 +167,7 @@ func main() {
|
||||||
|
|
||||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||||
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
|
queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
|
||||||
queryCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
queryCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
|
||||||
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
|
queryInstantCmd := queryCmd.Command("instant", "Run instant query.")
|
||||||
queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
queryInstantCmd.Arg("server", "Prometheus server to query.").Required().URLVar(&serverURL)
|
||||||
|
@ -210,7 +212,7 @@ func main() {
|
||||||
queryAnalyzeCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().StringsVar(&queryAnalyzeCfg.matchers)
|
queryAnalyzeCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().StringsVar(&queryAnalyzeCfg.matchers)
|
||||||
|
|
||||||
pushCmd := app.Command("push", "Push to a Prometheus server.")
|
pushCmd := app.Command("push", "Push to a Prometheus server.")
|
||||||
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
pushCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
|
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
|
||||||
pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
|
pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
|
||||||
metricFiles := pushMetricsCmd.Arg(
|
metricFiles := pushMetricsCmd.Arg(
|
||||||
|
@ -277,7 +279,7 @@ func main() {
|
||||||
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
|
importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String()
|
||||||
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
|
importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String()
|
||||||
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
|
importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.")
|
||||||
importRulesCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
importRulesCmd.Flag("http.config.file", httpConfigFileDescription).PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URLVar(&serverURL)
|
importRulesCmd.Flag("url", "The URL for the Prometheus API with the data where the rule will be backfilled from.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required.").
|
importRulesStart := importRulesCmd.Flag("start", "The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required.").
|
||||||
Required().String()
|
Required().String()
|
||||||
|
|
130
config/config.go
130
config/config.go
|
@ -68,11 +68,6 @@ var (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
LegacyValidationConfig = "legacy"
|
|
||||||
UTF8ValidationConfig = "utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Load parses the YAML input s into a Config.
|
// Load parses the YAML input s into a Config.
|
||||||
func Load(s string, logger *slog.Logger) (*Config, error) {
|
func Load(s string, logger *slog.Logger) (*Config, error) {
|
||||||
cfg := &Config{}
|
cfg := &Config{}
|
||||||
|
@ -109,10 +104,10 @@ func Load(s string, logger *slog.Logger) (*Config, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch cfg.OTLPConfig.TranslationStrategy {
|
switch cfg.OTLPConfig.TranslationStrategy {
|
||||||
case UnderscoreEscapingWithSuffixes:
|
case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
|
||||||
case "":
|
case "":
|
||||||
case NoTranslation, NoUTF8EscapingWithSuffixes:
|
case NoTranslation, NoUTF8EscapingWithSuffixes:
|
||||||
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
|
if cfg.GlobalConfig.MetricNameValidationScheme == model.LegacyValidation {
|
||||||
return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy)
|
return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -172,7 +167,7 @@ var (
|
||||||
ScrapeProtocols: DefaultScrapeProtocols,
|
ScrapeProtocols: DefaultScrapeProtocols,
|
||||||
ConvertClassicHistogramsToNHCB: false,
|
ConvertClassicHistogramsToNHCB: false,
|
||||||
AlwaysScrapeClassicHistograms: false,
|
AlwaysScrapeClassicHistograms: false,
|
||||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -486,8 +481,8 @@ type GlobalConfig struct {
|
||||||
// 0 means no limit.
|
// 0 means no limit.
|
||||||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||||
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
|
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
|
||||||
// have a value if a ScrapeConfig is created programmatically.
|
// have a value if a GlobalConfig is created programmatically.
|
||||||
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
|
MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"`
|
||||||
// Metric name escaping mode to request through content negotiation. Can be
|
// Metric name escaping mode to request through content negotiation. Can be
|
||||||
// blank in config files but must have a value if a ScrapeConfig is created
|
// blank in config files but must have a value if a ScrapeConfig is created
|
||||||
// programmatically.
|
// programmatically.
|
||||||
|
@ -755,7 +750,7 @@ type ScrapeConfig struct {
|
||||||
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||||
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
|
// Allow UTF8 Metric and Label Names. Can be blank in config files but must
|
||||||
// have a value if a ScrapeConfig is created programmatically.
|
// have a value if a ScrapeConfig is created programmatically.
|
||||||
MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"`
|
MetricNameValidationScheme model.ValidationScheme `yaml:"metric_name_validation_scheme,omitempty"`
|
||||||
// Metric name escaping mode to request through content negotiation. Can be
|
// Metric name escaping mode to request through content negotiation. Can be
|
||||||
// blank in config files but must have a value if a ScrapeConfig is created
|
// blank in config files but must have a value if a ScrapeConfig is created
|
||||||
// programmatically.
|
// programmatically.
|
||||||
|
@ -882,32 +877,32 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch globalConfig.MetricNameValidationScheme {
|
switch globalConfig.MetricNameValidationScheme {
|
||||||
case "":
|
case model.UnsetValidation:
|
||||||
globalConfig.MetricNameValidationScheme = UTF8ValidationConfig
|
globalConfig.MetricNameValidationScheme = model.UTF8Validation
|
||||||
case LegacyValidationConfig, UTF8ValidationConfig:
|
case model.LegacyValidation, model.UTF8Validation:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown global name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
return fmt.Errorf("unknown global name validation method specified, must be either '', 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
||||||
}
|
}
|
||||||
// Scrapeconfig validation scheme matches global if left blank.
|
// Scrapeconfig validation scheme matches global if left blank.
|
||||||
switch c.MetricNameValidationScheme {
|
switch c.MetricNameValidationScheme {
|
||||||
case "":
|
case model.UnsetValidation:
|
||||||
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
|
c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme
|
||||||
case LegacyValidationConfig, UTF8ValidationConfig:
|
case model.LegacyValidation, model.UTF8Validation:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown scrape config name validation method specified, must be either 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme)
|
return fmt.Errorf("unknown scrape config name validation method specified, must be either '', 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Escaping scheme is based on the validation scheme if left blank.
|
// Escaping scheme is based on the validation scheme if left blank.
|
||||||
switch globalConfig.MetricNameEscapingScheme {
|
switch globalConfig.MetricNameEscapingScheme {
|
||||||
case "":
|
case "":
|
||||||
if globalConfig.MetricNameValidationScheme == LegacyValidationConfig {
|
if globalConfig.MetricNameValidationScheme == model.LegacyValidation {
|
||||||
globalConfig.MetricNameEscapingScheme = model.EscapeUnderscores
|
globalConfig.MetricNameEscapingScheme = model.EscapeUnderscores
|
||||||
} else {
|
} else {
|
||||||
globalConfig.MetricNameEscapingScheme = model.AllowUTF8
|
globalConfig.MetricNameEscapingScheme = model.AllowUTF8
|
||||||
}
|
}
|
||||||
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameValidationScheme)
|
return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameEscapingScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.MetricNameEscapingScheme == "" {
|
if c.MetricNameEscapingScheme == "" {
|
||||||
|
@ -916,12 +911,12 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
|
|
||||||
switch c.MetricNameEscapingScheme {
|
switch c.MetricNameEscapingScheme {
|
||||||
case model.AllowUTF8:
|
case model.AllowUTF8:
|
||||||
if c.MetricNameValidationScheme != UTF8ValidationConfig {
|
if c.MetricNameValidationScheme != model.UTF8Validation {
|
||||||
return errors.New("utf8 metric names requested but validation scheme is not set to UTF8")
|
return errors.New("utf8 metric names requested but validation scheme is not set to UTF8")
|
||||||
}
|
}
|
||||||
case model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
case model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameValidationScheme)
|
return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %q", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameEscapingScheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.ConvertClassicHistogramsToNHCB == nil {
|
if c.ConvertClassicHistogramsToNHCB == nil {
|
||||||
|
@ -942,26 +937,6 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
|
||||||
return discovery.MarshalYAMLWithInlineConfigs(c)
|
return discovery.MarshalYAMLWithInlineConfigs(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToValidationScheme returns the validation scheme for the given string config value.
|
|
||||||
func ToValidationScheme(s string) (validationScheme model.ValidationScheme, err error) {
|
|
||||||
switch s {
|
|
||||||
case "":
|
|
||||||
// This is a workaround for third party exporters that don't set the validation scheme.
|
|
||||||
if DefaultGlobalConfig.MetricNameValidationScheme == "" {
|
|
||||||
return model.UTF8Validation, errors.New("global metric name validation scheme is not set")
|
|
||||||
}
|
|
||||||
return ToValidationScheme(DefaultGlobalConfig.MetricNameValidationScheme)
|
|
||||||
case UTF8ValidationConfig:
|
|
||||||
validationScheme = model.UTF8Validation
|
|
||||||
case LegacyValidationConfig:
|
|
||||||
validationScheme = model.LegacyValidation
|
|
||||||
default:
|
|
||||||
return model.UTF8Validation, fmt.Errorf("invalid metric name validation scheme, %s", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
return validationScheme, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToEscapingScheme wraps the equivalent common library function with the
|
// ToEscapingScheme wraps the equivalent common library function with the
|
||||||
// desired default behavior based on the given validation scheme. This is a
|
// desired default behavior based on the given validation scheme. This is a
|
||||||
// workaround for third party exporters that don't set the escaping scheme.
|
// workaround for third party exporters that don't set the escaping scheme.
|
||||||
|
@ -972,6 +947,10 @@ func ToEscapingScheme(s string, v model.ValidationScheme) (model.EscapingScheme,
|
||||||
return model.NoEscaping, nil
|
return model.NoEscaping, nil
|
||||||
case model.LegacyValidation:
|
case model.LegacyValidation:
|
||||||
return model.UnderscoreEscaping, nil
|
return model.UnderscoreEscaping, nil
|
||||||
|
case model.UnsetValidation:
|
||||||
|
return model.NoEscaping, fmt.Errorf("v is unset: %s", v)
|
||||||
|
default:
|
||||||
|
panic(fmt.Errorf("unhandled validation scheme: %s", v))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return model.ToEscapingScheme(s)
|
return model.ToEscapingScheme(s)
|
||||||
|
@ -1555,31 +1534,68 @@ func getGoGC() int {
|
||||||
type translationStrategyOption string
|
type translationStrategyOption string
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// NoUTF8EscapingWithSuffixes will accept metric/label names as they are.
|
// NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit
|
||||||
// Unit and type suffixes may be added to metric names, according to certain rules.
|
// and type suffixes may be added to metric names, according to certain rules.
|
||||||
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
|
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
|
||||||
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
|
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP
|
||||||
// This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores,
|
// to Prometheus. This option will translate metric name characters that are
|
||||||
// and label name characters that are not alphanumerics/underscores to underscores.
|
// not alphanumerics/underscores/colons to underscores, and label name
|
||||||
// Unit and type suffixes may be appended to metric names, according to certain rules.
|
// characters that are not alphanumerics/underscores to underscores. Unit and
|
||||||
|
// type suffixes may be appended to metric names, according to certain rules.
|
||||||
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
|
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
|
||||||
|
// UnderscoreEscapingWithoutSuffixes translates metric name characters that
|
||||||
|
// are not alphanumerics/underscores/colons to underscores, and label name
|
||||||
|
// characters that are not alphanumerics/underscores to underscores, but
|
||||||
|
// unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
|
||||||
|
// the names.
|
||||||
|
UnderscoreEscapingWithoutSuffixes translationStrategyOption = "UnderscoreEscapingWithoutSuffixes"
|
||||||
// NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
|
// NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
|
||||||
// and label names. This offers a way for the OTLP users to use native metric names, reducing confusion.
|
// and label names. This offers a way for the OTLP users to use native metric
|
||||||
|
// names, reducing confusion.
|
||||||
//
|
//
|
||||||
// WARNING: This setting has significant known risks and limitations (see
|
// WARNING: This setting has significant known risks and limitations (see
|
||||||
// https://prometheus.io/docs/practices/naming/ for details):
|
// https://prometheus.io/docs/practices/naming/ for details): * Impaired UX
|
||||||
// * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration).
|
// when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling
|
||||||
// * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed
|
// configuration). * Series collisions which in the best case may result in
|
||||||
// time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit
|
// OOO errors, in the worst case a silently malformed time series. For
|
||||||
// `seconds` and a separate series `foo.bar` with unit `milliseconds`.
|
// instance, you may end up in situation of ingesting `foo.bar` series with
|
||||||
|
// unit `seconds` and a separate series `foo.bar` with unit `milliseconds`.
|
||||||
//
|
//
|
||||||
// As a result, this setting is experimental and currently, should not be used in
|
// As a result, this setting is experimental and currently, should not be used
|
||||||
// production systems.
|
// in production systems.
|
||||||
//
|
//
|
||||||
// TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks.
|
// TODO(ArthurSens): Mention `type-and-unit-labels` feature
|
||||||
|
// (https://github.com/prometheus/proposals/pull/39) once released, as
|
||||||
|
// potential mitigation of the above risks.
|
||||||
NoTranslation translationStrategyOption = "NoTranslation"
|
NoTranslation translationStrategyOption = "NoTranslation"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ShouldEscape returns true if the translation strategy requires that metric
|
||||||
|
// names be escaped.
|
||||||
|
func (o translationStrategyOption) ShouldEscape() bool {
|
||||||
|
switch o {
|
||||||
|
case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
|
||||||
|
return true
|
||||||
|
case NoTranslation, NoUTF8EscapingWithSuffixes:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldAddSuffixes returns a bool deciding whether the given translation
|
||||||
|
// strategy should have suffixes added.
|
||||||
|
func (o translationStrategyOption) ShouldAddSuffixes() bool {
|
||||||
|
switch o {
|
||||||
|
case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes:
|
||||||
|
return true
|
||||||
|
case UnderscoreEscapingWithoutSuffixes, NoTranslation:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
||||||
type OTLPConfig struct {
|
type OTLPConfig struct {
|
||||||
PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"`
|
PromoteAllResourceAttributes bool `yaml:"promote_all_resource_attributes,omitempty"`
|
||||||
|
|
|
@ -2567,7 +2567,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||||
ScrapeInterval: opts.ScrapeInterval,
|
ScrapeInterval: opts.ScrapeInterval,
|
||||||
ScrapeTimeout: opts.ScrapeTimeout,
|
ScrapeTimeout: opts.ScrapeTimeout,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
|
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
|
@ -2627,7 +2627,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(60 * time.Second),
|
ScrapeInterval: model.Duration(60 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||||
|
@ -2664,7 +2664,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
MetricNameValidationScheme: UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
AlwaysScrapeClassicHistograms: boolPtr(false),
|
AlwaysScrapeClassicHistograms: boolPtr(false),
|
||||||
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
ConvertClassicHistogramsToNHCB: boolPtr(false),
|
||||||
|
@ -2788,27 +2788,27 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
inputFile string
|
inputFile string
|
||||||
expectScheme string
|
expectScheme model.ValidationScheme
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "blank config implies default",
|
name: "blank config implies default",
|
||||||
inputFile: "scrape_config_default_validation_mode",
|
inputFile: "scrape_config_default_validation_mode",
|
||||||
expectScheme: "utf8",
|
expectScheme: model.UTF8Validation,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "global setting implies local settings",
|
name: "global setting implies local settings",
|
||||||
inputFile: "scrape_config_global_validation_mode",
|
inputFile: "scrape_config_global_validation_mode",
|
||||||
expectScheme: "legacy",
|
expectScheme: model.LegacyValidation,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local setting",
|
name: "local setting",
|
||||||
inputFile: "scrape_config_local_validation_mode",
|
inputFile: "scrape_config_local_validation_mode",
|
||||||
expectScheme: "legacy",
|
expectScheme: model.LegacyValidation,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local setting overrides global setting",
|
name: "local setting overrides global setting",
|
||||||
inputFile: "scrape_config_local_global_validation_mode",
|
inputFile: "scrape_config_local_global_validation_mode",
|
||||||
expectScheme: "utf8",
|
expectScheme: model.UTF8Validation,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2832,31 +2832,31 @@ func TestScrapeConfigNameEscapingSettings(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
inputFile string
|
inputFile string
|
||||||
expectValidationScheme string
|
expectValidationScheme model.ValidationScheme
|
||||||
expectEscapingScheme string
|
expectEscapingScheme string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "blank config implies default",
|
name: "blank config implies default",
|
||||||
inputFile: "scrape_config_default_validation_mode",
|
inputFile: "scrape_config_default_validation_mode",
|
||||||
expectValidationScheme: "utf8",
|
expectValidationScheme: model.UTF8Validation,
|
||||||
expectEscapingScheme: "allow-utf-8",
|
expectEscapingScheme: "allow-utf-8",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "global setting implies local settings",
|
name: "global setting implies local settings",
|
||||||
inputFile: "scrape_config_global_validation_mode",
|
inputFile: "scrape_config_global_validation_mode",
|
||||||
expectValidationScheme: "legacy",
|
expectValidationScheme: model.LegacyValidation,
|
||||||
expectEscapingScheme: "dots",
|
expectEscapingScheme: "dots",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local setting",
|
name: "local setting",
|
||||||
inputFile: "scrape_config_local_validation_mode",
|
inputFile: "scrape_config_local_validation_mode",
|
||||||
expectValidationScheme: "legacy",
|
expectValidationScheme: model.LegacyValidation,
|
||||||
expectEscapingScheme: "values",
|
expectEscapingScheme: "values",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local setting overrides global setting",
|
name: "local setting overrides global setting",
|
||||||
inputFile: "scrape_config_local_global_validation_mode",
|
inputFile: "scrape_config_local_global_validation_mode",
|
||||||
expectValidationScheme: "utf8",
|
expectValidationScheme: model.UTF8Validation,
|
||||||
expectEscapingScheme: "dots",
|
expectEscapingScheme: "dots",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,8 @@ func (p *Provider) Discoverer() Discoverer {
|
||||||
|
|
||||||
// IsStarted return true if Discoverer is started.
|
// IsStarted return true if Discoverer is started.
|
||||||
func (p *Provider) IsStarted() bool {
|
func (p *Provider) IsStarted() bool {
|
||||||
|
p.mu.RLock()
|
||||||
|
defer p.mu.RUnlock()
|
||||||
return p.cancel != nil
|
return p.cancel != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,15 +218,22 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||||
newProviders []*Provider
|
newProviders []*Provider
|
||||||
)
|
)
|
||||||
for _, prov := range m.providers {
|
for _, prov := range m.providers {
|
||||||
// Cancel obsolete providers.
|
// Cancel obsolete providers if it has no new subs and it has a cancel function.
|
||||||
if len(prov.newSubs) == 0 {
|
// prov.cancel != nil is the same check as we use in IsStarted() method but we don't call IsStarted
|
||||||
|
// here because it would take a lock and we need the same lock ourselves for other reads.
|
||||||
|
prov.mu.RLock()
|
||||||
|
if len(prov.newSubs) == 0 && prov.cancel != nil {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
prov.done = func() {
|
prov.done = func() {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
prov.cancel()
|
prov.cancel()
|
||||||
|
prov.mu.RUnlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
prov.mu.RUnlock()
|
||||||
|
|
||||||
newProviders = append(newProviders, prov)
|
newProviders = append(newProviders, prov)
|
||||||
// refTargets keeps reference targets used to populate new subs' targets as they should be the same.
|
// refTargets keeps reference targets used to populate new subs' targets as they should be the same.
|
||||||
var refTargets map[string]*targetgroup.Group
|
var refTargets map[string]*targetgroup.Group
|
||||||
|
@ -298,7 +307,9 @@ func (m *Manager) startProvider(ctx context.Context, p *Provider) {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
updates := make(chan []*targetgroup.Group)
|
updates := make(chan []*targetgroup.Group)
|
||||||
|
|
||||||
|
p.mu.Lock()
|
||||||
p.cancel = cancel
|
p.cancel = cancel
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
go p.d.Run(ctx, updates)
|
go p.d.Run(ctx, updates)
|
||||||
go m.updater(ctx, p, updates)
|
go m.updater(ctx, p, updates)
|
||||||
|
@ -306,16 +317,20 @@ func (m *Manager) startProvider(ctx context.Context, p *Provider) {
|
||||||
|
|
||||||
// cleaner cleans resources associated with provider.
|
// cleaner cleans resources associated with provider.
|
||||||
func (m *Manager) cleaner(p *Provider) {
|
func (m *Manager) cleaner(p *Provider) {
|
||||||
p.mu.RLock()
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
m.targetsMtx.Lock()
|
m.targetsMtx.Lock()
|
||||||
for s := range p.subs {
|
for s := range p.subs {
|
||||||
delete(m.targets, poolKey{s, p.name})
|
delete(m.targets, poolKey{s, p.name})
|
||||||
}
|
}
|
||||||
m.targetsMtx.Unlock()
|
m.targetsMtx.Unlock()
|
||||||
p.mu.RUnlock()
|
|
||||||
if p.done != nil {
|
if p.done != nil {
|
||||||
p.done()
|
p.done()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Provider was cleaned so mark is as down.
|
||||||
|
p.cancel = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targetgroup.Group) {
|
func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targetgroup.Group) {
|
||||||
|
@ -350,8 +365,10 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
|
||||||
|
|
||||||
func (m *Manager) sender() {
|
func (m *Manager) sender() {
|
||||||
ticker := time.NewTicker(m.updatert)
|
ticker := time.NewTicker(m.updatert)
|
||||||
defer ticker.Stop()
|
defer func() {
|
||||||
|
ticker.Stop()
|
||||||
|
close(m.syncCh)
|
||||||
|
}()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-m.ctx.Done():
|
case <-m.ctx.Done():
|
||||||
|
@ -380,9 +397,11 @@ func (m *Manager) cancelDiscoverers() {
|
||||||
m.mtx.RLock()
|
m.mtx.RLock()
|
||||||
defer m.mtx.RUnlock()
|
defer m.mtx.RUnlock()
|
||||||
for _, p := range m.providers {
|
for _, p := range m.providers {
|
||||||
|
p.mu.RLock()
|
||||||
if p.cancel != nil {
|
if p.cancel != nil {
|
||||||
p.cancel()
|
p.cancel()
|
||||||
}
|
}
|
||||||
|
p.mu.RUnlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,19 +510,3 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
||||||
}
|
}
|
||||||
return failed
|
return failed
|
||||||
}
|
}
|
||||||
|
|
||||||
// StaticProvider holds a list of target groups that never change.
|
|
||||||
type StaticProvider struct {
|
|
||||||
TargetGroups []*targetgroup.Group
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run implements the Worker interface.
|
|
||||||
func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|
||||||
// We still have to consider that the consumer exits right away in which case
|
|
||||||
// the context will be canceled.
|
|
||||||
select {
|
|
||||||
case ch <- sd.TargetGroups:
|
|
||||||
case <-ctx.Done():
|
|
||||||
}
|
|
||||||
close(ch)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1562,3 +1562,53 @@ func TestUnregisterMetrics(t *testing.T) {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Calling ApplyConfig() that removes providers at the same time as shutting down
|
||||||
|
// the manager should not hang.
|
||||||
|
func TestConfigReloadAndShutdownRace(t *testing.T) {
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
|
mgrCtx, mgrCancel := context.WithCancel(context.Background())
|
||||||
|
discoveryManager := NewManager(mgrCtx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
|
require.NotNil(t, discoveryManager)
|
||||||
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
|
|
||||||
|
var wgDiscovery sync.WaitGroup
|
||||||
|
wgDiscovery.Add(1)
|
||||||
|
go func() {
|
||||||
|
discoveryManager.Run()
|
||||||
|
wgDiscovery.Done()
|
||||||
|
}()
|
||||||
|
time.Sleep(time.Millisecond * 200)
|
||||||
|
|
||||||
|
var wgBg sync.WaitGroup
|
||||||
|
updateChan := discoveryManager.SyncCh()
|
||||||
|
wgBg.Add(1)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go func() {
|
||||||
|
defer wgBg.Done()
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-updateChan:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
c := map[string]Configs{
|
||||||
|
"prometheus": {staticConfig("bar:9090")},
|
||||||
|
}
|
||||||
|
discoveryManager.ApplyConfig(c)
|
||||||
|
|
||||||
|
delete(c, "prometheus")
|
||||||
|
wgBg.Add(1)
|
||||||
|
go func() {
|
||||||
|
discoveryManager.ApplyConfig(c)
|
||||||
|
wgBg.Done()
|
||||||
|
}()
|
||||||
|
mgrCancel()
|
||||||
|
wgDiscovery.Wait()
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
wgBg.Wait()
|
||||||
|
}
|
||||||
|
|
|
@ -266,7 +266,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
||||||
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
|
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
|
||||||
err := rmm.Register()
|
err := rmm.Register()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("failed to create service discovery refresh metrics")
|
return nil, fmt.Errorf("failed to create service discovery refresh metrics: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics := make(map[string]DiscovererMetrics)
|
metrics := make(map[string]DiscovererMetrics)
|
||||||
|
@ -274,7 +274,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
|
||||||
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
|
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
|
||||||
err = currentSdMetrics.Register()
|
err = currentSdMetrics.Register()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("failed to create service discovery metrics")
|
return nil, fmt.Errorf("failed to create service discovery metrics: %w", err)
|
||||||
}
|
}
|
||||||
metrics[conf.Name()] = currentSdMetrics
|
metrics[conf.Name()] = currentSdMetrics
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,7 +142,7 @@ Check if the Prometheus server is healthy.
|
||||||
|
|
||||||
| Flag | Description | Default |
|
| Flag | Description | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. | |
|
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
|
||||||
| <code class="text-nowrap">--url</code> | The URL for the Prometheus server. | `http://localhost:9090` |
|
| <code class="text-nowrap">--url</code> | The URL for the Prometheus server. | `http://localhost:9090` |
|
||||||
|
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ Check if the Prometheus server is ready.
|
||||||
|
|
||||||
| Flag | Description | Default |
|
| Flag | Description | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. | |
|
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
|
||||||
| <code class="text-nowrap">--url</code> | The URL for the Prometheus server. | `http://localhost:9090` |
|
| <code class="text-nowrap">--url</code> | The URL for the Prometheus server. | `http://localhost:9090` |
|
||||||
|
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ Run query against a Prometheus server.
|
||||||
| Flag | Description | Default |
|
| Flag | Description | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| <code class="text-nowrap">-o</code>, <code class="text-nowrap">--format</code> | Output format of the query. | `promql` |
|
| <code class="text-nowrap">-o</code>, <code class="text-nowrap">--format</code> | Output format of the query. | `promql` |
|
||||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. | |
|
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ Push to a Prometheus server.
|
||||||
|
|
||||||
| Flag | Description |
|
| Flag | Description |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. |
|
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -672,7 +672,7 @@ Create blocks of data for new recording rules.
|
||||||
|
|
||||||
| Flag | Description | Default |
|
| Flag | Description | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file for promtool to connect to Prometheus. | |
|
| <code class="text-nowrap">--http.config.file</code> | HTTP client configuration file, see details at https://prometheus.io/docs/prometheus/latest/configuration/promtool | |
|
||||||
| <code class="text-nowrap">--url</code> | The URL for the Prometheus API with the data where the rule will be backfilled from. | `http://localhost:9090` |
|
| <code class="text-nowrap">--url</code> | The URL for the Prometheus API with the data where the rule will be backfilled from. | `http://localhost:9090` |
|
||||||
| <code class="text-nowrap">--start</code> | The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required. | |
|
| <code class="text-nowrap">--start</code> | The time to start backfilling the new rule from. Must be a RFC3339 formatted date or Unix timestamp. Required. | |
|
||||||
| <code class="text-nowrap">--end</code> | If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp. | |
|
| <code class="text-nowrap">--end</code> | If an end time is provided, all recording rules in the rule files provided will be backfilled to the end time. Default will backfill up to 3 hours ago. Must be a RFC3339 formatted date or Unix timestamp. | |
|
||||||
|
|
|
@ -197,6 +197,11 @@ otlp:
|
||||||
# - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus.
|
# - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus.
|
||||||
# It preserves all special characters like dots, but still adds required metric name suffixes
|
# It preserves all special characters like dots, but still adds required metric name suffixes
|
||||||
# for units and _total, as UnderscoreEscapingWithSuffixes does.
|
# for units and _total, as UnderscoreEscapingWithSuffixes does.
|
||||||
|
# - "UnderscoreEscapingWithoutSuffixes" translates metric name characters that
|
||||||
|
# are not alphanumerics/underscores/colons to underscores, and label name
|
||||||
|
# characters that are not alphanumerics/underscores to underscores, but
|
||||||
|
# unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
|
||||||
|
# the names.
|
||||||
# - (EXPERIMENTAL) "NoTranslation" is a mode that relies on UTF-8 support in Prometheus.
|
# - (EXPERIMENTAL) "NoTranslation" is a mode that relies on UTF-8 support in Prometheus.
|
||||||
# It preserves all special character like dots and won't append special suffixes for metric
|
# It preserves all special character like dots and won't append special suffixes for metric
|
||||||
# unit and type.
|
# unit and type.
|
||||||
|
@ -257,7 +262,7 @@ job_name: <job_name>
|
||||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||||
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
[ scrape_protocols: [<string>, ...] | default = <global_config.scrape_protocols> ]
|
||||||
|
|
||||||
# Fallback protocol to use if a scrape returns blank, unparseable, or otherwise
|
# Fallback protocol to use if a scrape returns blank, unparsable, or otherwise
|
||||||
# invalid Content-Type.
|
# invalid Content-Type.
|
||||||
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
# Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||||
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
# OpenMetricsText1.0.0, PrometheusText0.0.4, PrometheusText1.0.0.
|
||||||
|
|
|
@ -0,0 +1,175 @@
|
||||||
|
---
|
||||||
|
title: HTTP configuration for promtool
|
||||||
|
sort_rank: 6
|
||||||
|
---
|
||||||
|
|
||||||
|
Promtool is a versatile CLI tool for Prometheus that supports validation, debugging, querying, unit testing, tsdb management, pushing data, and experimental PromQL editing.
|
||||||
|
|
||||||
|
Prometheus supports basic authentication and TLS. Since promtool needs to connect to Prometheus, we need to provide the authentication details. To specify those authentication details, use the `--http.config.file` for all requests that need to communicate with Prometheus.
|
||||||
|
For instance, if you would like to check whether your local Prometheus server is healthy, you would use:
|
||||||
|
```bash
|
||||||
|
promtool check healthy --url=http://localhost:9090 --http.config.file=http-config-file.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
The file is written in [YAML format](https://en.wikipedia.org/wiki/YAML), defined by the schema described below.
|
||||||
|
Brackets indicate that a parameter is optional. For non-list parameters the value is set to the specified default.
|
||||||
|
|
||||||
|
The file is read upon every http request, such as any change in the
|
||||||
|
configuration and the certificates is picked up immediately.
|
||||||
|
|
||||||
|
Generic placeholders are defined as follows:
|
||||||
|
|
||||||
|
* `<bool>`: a boolean that can take the values `true` or `false`
|
||||||
|
* `<filename>`: a valid path to a file
|
||||||
|
* `<secret>`: a regular string that is a secret, such as a password
|
||||||
|
* `<string>`: a regular string
|
||||||
|
|
||||||
|
A valid example file can be found [here](/documentation/examples/promtool-http-config-file.yml).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Note that `basic_auth` and `authorization` options are mutually exclusive.
|
||||||
|
|
||||||
|
# Sets the `Authorization` header with the configured username and password.
|
||||||
|
# `username_ref` and `password_ref`refer to the name of the secret within the secret manager.
|
||||||
|
# `password`, `password_file` and `password_ref` are mutually exclusive.
|
||||||
|
basic_auth:
|
||||||
|
[ username: <string> ]
|
||||||
|
[ username_file: <filename> ]
|
||||||
|
[ username_ref: <string> ]
|
||||||
|
[ password: <secret> ]
|
||||||
|
[ password_file: <string> ]
|
||||||
|
[ password_ref: <string> ]
|
||||||
|
|
||||||
|
# Optional the `Authorization` header configuration.
|
||||||
|
authorization:
|
||||||
|
# Sets the authentication type.
|
||||||
|
[ type: <string> | default: Bearer ]
|
||||||
|
# Sets the credentials. It is mutually exclusive with
|
||||||
|
# `credentials_file`.
|
||||||
|
[ credentials: <secret> ]
|
||||||
|
# Sets the credentials with the credentials read from the configured file.
|
||||||
|
# It is mutually exclusive with `credentials`.
|
||||||
|
[ credentials_file: <filename> ]
|
||||||
|
[ credentials_ref: <string> ]
|
||||||
|
|
||||||
|
# Optional OAuth 2.0 configuration.
|
||||||
|
# Cannot be used at the same time as basic_auth or authorization.
|
||||||
|
oauth2:
|
||||||
|
[ <oauth2> ]
|
||||||
|
|
||||||
|
tls_config:
|
||||||
|
[ <tls_config> ]
|
||||||
|
|
||||||
|
[ follow_redirects: <bool> | default: true ]
|
||||||
|
|
||||||
|
# Whether to enable HTTP2.
|
||||||
|
[ enable_http2: <bool> | default: true ]
|
||||||
|
|
||||||
|
# Optional proxy URL.
|
||||||
|
[ proxy_url: <string> ]
|
||||||
|
# Comma-separated string that can contain IPs, CIDR notation, domain names
|
||||||
|
# that should be excluded from proxying. IP and domain names can
|
||||||
|
# contain port numbers.
|
||||||
|
[ no_proxy: <string> ]
|
||||||
|
[ proxy_from_environment: <bool> ]
|
||||||
|
[ proxy_connect_header:
|
||||||
|
[ <string>: [ <secret>, ... ] ] ]
|
||||||
|
|
||||||
|
# `http_headers` specifies a set of headers that will be injected into each request.
|
||||||
|
http_headers:
|
||||||
|
[ <string>: <header> ]
|
||||||
|
```
|
||||||
|
|
||||||
|
## \<oauth2\>
|
||||||
|
OAuth 2.0 authentication using the client credentials grant type.
|
||||||
|
```yaml
|
||||||
|
# `client_id` and `client_secret` are used to authenticate your
|
||||||
|
# application with the authorization server in order to get
|
||||||
|
# an access token.
|
||||||
|
# `client_secret`, `client_secret_file` and `client_secret_ref` are mutually exclusive.
|
||||||
|
client_id: <string>
|
||||||
|
[ client_secret: <secret> ]
|
||||||
|
[ client_secret_file: <filename> ]
|
||||||
|
[ client_secret_ref: <string> ]
|
||||||
|
|
||||||
|
# `scopes` specify the reason for the resource access.
|
||||||
|
scopes:
|
||||||
|
[ - <string> ...]
|
||||||
|
|
||||||
|
# The URL to fetch the token from.
|
||||||
|
token_url: <string>
|
||||||
|
|
||||||
|
# Optional parameters to append to the token URL.
|
||||||
|
[ endpoint_params:
|
||||||
|
<string>: <string> ... ]
|
||||||
|
|
||||||
|
# Configures the token request's TLS settings.
|
||||||
|
tls_config:
|
||||||
|
[ <tls_config> ]
|
||||||
|
|
||||||
|
# Optional proxy URL.
|
||||||
|
[ proxy_url: <string> ]
|
||||||
|
# Comma-separated string that can contain IPs, CIDR notation, domain names
|
||||||
|
# that should be excluded from proxying. IP and domain names can
|
||||||
|
# contain port numbers.
|
||||||
|
[ no_proxy: <string> ]
|
||||||
|
[ proxy_from_environment: <bool> ]
|
||||||
|
[ proxy_connect_header:
|
||||||
|
[ <string>: [ <secret>, ... ] ] ]
|
||||||
|
```
|
||||||
|
|
||||||
|
## <tls_config>
|
||||||
|
```yaml
|
||||||
|
# For the following configurations, use either `ca`, `cert` and `key` or `ca_file`, `cert_file` and `key_file` or use `ca_ref`, `cert_ref` or `key_ref`.
|
||||||
|
# Text of the CA certificate to use for the server.
|
||||||
|
[ ca: <string> ]
|
||||||
|
# CA certificate to validate the server certificate with.
|
||||||
|
[ ca_file: <filename> ]
|
||||||
|
# `ca_ref` is the name of the secret within the secret manager to use as the CA cert.
|
||||||
|
[ ca_ref: <string> ]
|
||||||
|
|
||||||
|
# Text of the client cert file for the server.
|
||||||
|
[ cert: <string> ]
|
||||||
|
# Certificate file for client certificate authentication.
|
||||||
|
[ cert_file: <filename> ]
|
||||||
|
# `cert_ref` is the name of the secret within the secret manager to use as the client certificate.
|
||||||
|
[ cert_ref: <string> ]
|
||||||
|
|
||||||
|
# Text of the client key file for the server.
|
||||||
|
[ key: <secret> ]
|
||||||
|
# Key file for client certificate authentication.
|
||||||
|
[ key_file: <filename> ]
|
||||||
|
# `key_ref` is the name of the secret within the secret manager to use as the client key.
|
||||||
|
[ key_ref: <string> ]
|
||||||
|
|
||||||
|
# ServerName extension to indicate the name of the server.
|
||||||
|
# http://tools.ietf.org/html/rfc4366#section-3.1
|
||||||
|
[ server_name: <string> ]
|
||||||
|
|
||||||
|
# Disable validation of the server certificate.
|
||||||
|
[ insecure_skip_verify: <bool> ]
|
||||||
|
|
||||||
|
# Minimum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS
|
||||||
|
# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3).
|
||||||
|
# If unset, promtool will use Go default minimum version, which is TLS 1.2.
|
||||||
|
# See MinVersion in https://pkg.go.dev/crypto/tls#Config.
|
||||||
|
[ min_version: <string> ]
|
||||||
|
# Maximum acceptable TLS version. Accepted values: TLS10 (TLS 1.0), TLS11 (TLS
|
||||||
|
# 1.1), TLS12 (TLS 1.2), TLS13 (TLS 1.3).
|
||||||
|
# If unset, promtool will use Go default maximum version, which is TLS 1.3.
|
||||||
|
# See MaxVersion in https://pkg.go.dev/crypto/tls#Config.
|
||||||
|
[ max_version: <string> ]
|
||||||
|
```
|
||||||
|
|
||||||
|
## \<header\>
|
||||||
|
`header` represents the configuration for a single HTTP header.
|
||||||
|
```yaml
|
||||||
|
[ values:
|
||||||
|
[ - <string> ... ] ]
|
||||||
|
|
||||||
|
[ secrets:
|
||||||
|
[ - <secret> ... ] ]
|
||||||
|
|
||||||
|
[ files:
|
||||||
|
[ - <filename> ... ] ]
|
||||||
|
```
|
|
@ -57,6 +57,8 @@ If functions are used in a pipeline, the pipeline value is passed as the last ar
|
||||||
| humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. |
|
| humanizePercentage | number or string | string | Converts a ratio value to a fraction of 100. |
|
||||||
| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
|
| humanizeTimestamp | number or string | string | Converts a Unix timestamp in seconds to a more readable format. |
|
||||||
| toTime | number or string | *time.Time | Converts a Unix timestamp in seconds to a time.Time. |
|
| toTime | number or string | *time.Time | Converts a Unix timestamp in seconds to a time.Time. |
|
||||||
|
| toDuration | number or string | *time.Duration | Converts a duration in seconds to a time.Duration. |
|
||||||
|
| now | none | float64 | Returns the Unix timestamp in seconds at the time of the template evaluation. |
|
||||||
|
|
||||||
Humanizing functions are intended to produce reasonable output for consumption
|
Humanizing functions are intended to produce reasonable output for consumption
|
||||||
by humans, and are not guaranteed to return the same results between Prometheus
|
by humans, and are not guaranteed to return the same results between Prometheus
|
||||||
|
|
|
@ -181,6 +181,8 @@ This state is periodically ([`max_stale`][d2c]) cleared of inactive series.
|
||||||
Enabling this _can_ have negative impact on performance, because the in-memory
|
Enabling this _can_ have negative impact on performance, because the in-memory
|
||||||
state is mutex guarded. Cumulative-only OTLP requests are not affected.
|
state is mutex guarded. Cumulative-only OTLP requests are not affected.
|
||||||
|
|
||||||
|
[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor
|
||||||
|
|
||||||
## PromQL arithmetic expressions in time durations
|
## PromQL arithmetic expressions in time durations
|
||||||
|
|
||||||
`--enable-feature=promql-duration-expr`
|
`--enable-feature=promql-duration-expr`
|
||||||
|
@ -203,6 +205,12 @@ When using offset with duration expressions, you must wrap the expression in
|
||||||
parentheses. Without parentheses, only the first duration value will be used in
|
parentheses. Without parentheses, only the first duration value will be used in
|
||||||
the offset calculation.
|
the offset calculation.
|
||||||
|
|
||||||
|
`step()` can be used in duration expressions.
|
||||||
|
For a **range query**, it resolves to the step width of the range query.
|
||||||
|
For an **instant query**, it resolves to `0s`.
|
||||||
|
|
||||||
|
`min(<duration>, <duration>)` and `max(<duration>, <duration>)` can be used to find the minimum or maximum of two duration expressions.
|
||||||
|
|
||||||
**Note**: Duration expressions are not supported in the @ timestamp operator.
|
**Note**: Duration expressions are not supported in the @ timestamp operator.
|
||||||
|
|
||||||
The following operators are supported:
|
The following operators are supported:
|
||||||
|
@ -216,14 +224,16 @@ The following operators are supported:
|
||||||
|
|
||||||
Examples of equivalent durations:
|
Examples of equivalent durations:
|
||||||
|
|
||||||
* `5m * 2` is the equivalent to `10m` or `600s`
|
* `5m * 2` is equivalent to `10m` or `600s`
|
||||||
* `10m - 1m` is the equivalent to `9m` or `540s`
|
* `10m - 1m` is equivalent to `9m` or `540s`
|
||||||
* `(5+2) * 1m` is the equivalent to `7m` or `420s`
|
* `(5+2) * 1m` is equivalent to `7m` or `420s`
|
||||||
* `1h / 2` is the equivalent to `30m` or `1800s`
|
* `1h / 2` is equivalent to `30m` or `1800s`
|
||||||
* `4h % 3h` is the equivalent to `1h` or `3600s`
|
* `4h % 3h` is equivalent to `1h` or `3600s`
|
||||||
* `(2 ^ 3) * 1m` is the equivalent to `8m` or `480s`
|
* `(2 ^ 3) * 1m` is equivalent to `8m` or `480s`
|
||||||
|
* `step() + 1` is equivalent to the query step width increased by 1s.
|
||||||
|
* `max(step(), 5s)` is equivalent to the larger of the query step width and `5s`.
|
||||||
|
* `min(2 * step() + 5s, 5m)` is equivalent to the smaller of twice the query step increased by `5s` and `5m`.
|
||||||
|
|
||||||
[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor
|
|
||||||
|
|
||||||
## OTLP Native Delta Support
|
## OTLP Native Delta Support
|
||||||
|
|
||||||
|
|
|
@ -25,47 +25,58 @@ and vector/vector value pairs. They follow the usual [IEEE 754 floating point
|
||||||
arithmetic](https://en.wikipedia.org/wiki/IEEE_754), including the handling of
|
arithmetic](https://en.wikipedia.org/wiki/IEEE_754), including the handling of
|
||||||
special values like `NaN`, `+Inf`, and `-Inf`.
|
special values like `NaN`, `+Inf`, and `-Inf`.
|
||||||
|
|
||||||
**Between two scalars**, the behavior is obvious: they evaluate to another
|
**Between two scalars**, the behavior is straightforward: they evaluate to another
|
||||||
scalar that is the result of the operator applied to both scalar operands.
|
scalar that is the result of the operator applied to both scalar operands.
|
||||||
|
|
||||||
**Between an instant vector and a scalar**, the operator is applied to the
|
**Between an instant vector and a scalar**, the operator is applied to the
|
||||||
value of every data sample in the vector. If the data sample is a float, the
|
value of every data sample in the vector.
|
||||||
operation performed on the data sample is again obvious, e.g. if an instant
|
|
||||||
vector of float samples is multiplied by 2, the result is another vector of
|
If the data sample is a float, the operation is performed between that float and the scalar.
|
||||||
float samples in which every sample value of the original vector is multiplied
|
For example, if an instant vector of float samples is multiplied by 2,
|
||||||
by 2. For vector elements that are histogram samples, the behavior is the
|
the result is another vector of float samples in which every sample value of
|
||||||
following: For `*`, all bucket populations and the count and the sum of
|
the original vector is multiplied by 2.
|
||||||
observations are multiplied by the scalar. For `/`, the histogram sample has to
|
|
||||||
be on the left hand side (LHS), followed by the scalar on the right hand side
|
For vector elements that are histogram samples, the behavior is the
|
||||||
(RHS). All bucket populations and the count and the sum of observations are
|
following:
|
||||||
then divided by the scalar. A division by zero results in a histogram with no
|
|
||||||
regular buckets and the zero bucket population and the count and sum of
|
* For `*`, all bucket populations and the count and the sum of observations
|
||||||
observations all set to +Inf, -Inf, or NaN, depending on their values in the
|
are multiplied by the scalar.
|
||||||
input histogram (positive, negative, or zero/NaN, respectively). For `/` with a
|
|
||||||
scalar on the LHS and a histogram sample on the RHS, and similarly for all
|
* For `/`, the histogram sample has to be on the left hand side (LHS), followed
|
||||||
other arithmetic binary operators in any combination of a scalar and a
|
by the scalar on the right hand side (RHS). All bucket populations and the count
|
||||||
histogram sample, there is no result and the corresponding element is removed
|
and the sum of observations are then divided by the scalar. A division by zero
|
||||||
from the resulting vector. Such a removal is flagged by an info-level
|
results in a histogram with no regular buckets and the zero bucket population
|
||||||
annotation.
|
and the count and sum of observations all set to `+Inf`, `-Inf`, or `NaN`, depending
|
||||||
|
on their values in the input histogram (positive, negative, or zero/`NaN`, respectively).
|
||||||
|
|
||||||
|
* For `/` with a scalar on the LHS and a histogram sample on the RHS, and similarly for all
|
||||||
|
other arithmetic binary operators in any combination of a scalar and a
|
||||||
|
histogram sample, there is no result and the corresponding element is removed
|
||||||
|
from the resulting vector. Such a removal is flagged by an info-level
|
||||||
|
annotation.
|
||||||
|
|
||||||
**Between two instant vectors**, a binary arithmetic operator is applied to
|
**Between two instant vectors**, a binary arithmetic operator is applied to
|
||||||
each entry in the LHS vector and its [matching element](#vector-matching) in
|
each entry in the LHS vector and its [matching element](#vector-matching) in
|
||||||
the RHS vector. The result is propagated into the result vector with the
|
the RHS vector. The result is propagated into the result vector with the
|
||||||
grouping labels becoming the output label set. Entries for which no matching
|
grouping labels becoming the output label set. Entries for which no matching
|
||||||
entry in the right-hand vector can be found are not part of the result. If two
|
entry in the right-hand vector can be found are not part of the result.
|
||||||
float samples are matched, the behavior is obvious. If a float sample is
|
|
||||||
matched with a histogram sample, the behavior follows the same logic as between
|
If two float samples are matched, the arithmetic operator is applied to the two input values.
|
||||||
a scalar and a histogram sample (see above), i.e. `*` and `/` (the latter with
|
|
||||||
the histogram sample on the LHS) are valid operations, while all others lead to
|
If a float sample is matched with a histogram sample, the behavior follows the same
|
||||||
the removal of the corresponding element from the resulting vector. If two
|
logic as between a scalar and a histogram sample (see above), i.e. `*` and `/`
|
||||||
histogram samples are matched, only `+` and `-` are valid operations, each
|
(the latter with the histogram sample on the LHS) are valid operations, while all
|
||||||
adding or substracting all matching bucket populations and the count and the
|
others lead to the removal of the corresponding element from the resulting vector.
|
||||||
|
|
||||||
|
If two histogram samples are matched, only `+` and `-` are valid operations, each
|
||||||
|
adding or subtracting all matching bucket populations and the count and the
|
||||||
sum of observations. All other operations result in the removal of the
|
sum of observations. All other operations result in the removal of the
|
||||||
corresponding element from the output vector, flagged by an info-level
|
corresponding element from the output vector, flagged by an info-level
|
||||||
annotation.
|
annotation.
|
||||||
|
|
||||||
**In any arithmetic binary operation involving vectors**, the metric name is
|
**In any arithmetic binary operation involving vectors**, the metric name is
|
||||||
dropped.
|
dropped. This occurs even if `__name__` is explicitly mentioned in `on`
|
||||||
|
(see https://github.com/prometheus/prometheus/issues/16631 for further discussion).
|
||||||
|
|
||||||
### Trigonometric binary operators
|
### Trigonometric binary operators
|
||||||
|
|
||||||
|
@ -102,8 +113,8 @@ operators result in another scalar that is either `0` (`false`) or `1`
|
||||||
|
|
||||||
**Between an instant vector and a scalar**, these operators are applied to the
|
**Between an instant vector and a scalar**, these operators are applied to the
|
||||||
value of every data sample in the vector, and vector elements between which the
|
value of every data sample in the vector, and vector elements between which the
|
||||||
comparison result is `false` get dropped from the result vector. These
|
comparison result is false get dropped from the result vector. These
|
||||||
operation only work with float samples in the vector. For histogram samples,
|
operations only work with float samples in the vector. For histogram samples,
|
||||||
the corresponding element is removed from the result vector, flagged by an
|
the corresponding element is removed from the result vector, flagged by an
|
||||||
info-level annotation.
|
info-level annotation.
|
||||||
|
|
||||||
|
@ -111,19 +122,33 @@ info-level annotation.
|
||||||
applied to matching entries. Vector elements for which the expression is not
|
applied to matching entries. Vector elements for which the expression is not
|
||||||
true or which do not find a match on the other side of the expression get
|
true or which do not find a match on the other side of the expression get
|
||||||
dropped from the result, while the others are propagated into a result vector
|
dropped from the result, while the others are propagated into a result vector
|
||||||
with the grouping labels becoming the output label set. Matches between two
|
with the grouping labels becoming the output label set.
|
||||||
float samples work as usual, while matches between a float sample and a
|
|
||||||
histogram sample are invalid. The corresponding element is removed from the
|
Matches between two float samples work as usual.
|
||||||
result vector, flagged by an info-level annotation. Between two histogram
|
|
||||||
samples, `==` and `!=` work as expected, but all other comparison binary
|
Matches between a float sample and a histogram sample are invalid, and the
|
||||||
operations are again invalid.
|
corresponding element is removed from the result vector, flagged by an info-level
|
||||||
|
annotation.
|
||||||
|
|
||||||
|
Between two histogram samples, `==` and `!=` work as expected, but all other
|
||||||
|
comparison binary operations are again invalid.
|
||||||
|
|
||||||
**In any comparison binary operation involving vectors**, providing the `bool`
|
**In any comparison binary operation involving vectors**, providing the `bool`
|
||||||
modifier changes the behavior in the following way: Vector elements that would
|
modifier changes the behavior in the following ways:
|
||||||
be dropped instead have the value `0` and vector elements that would be kept
|
|
||||||
have the value `1`. Additionally, the metric name is dropped. (Note that
|
* Vector elements which find a match on the other side of the expression but for
|
||||||
invalid operations involving histogram samples still return no result rather
|
which the expression is false instead have the value `0` and vector elements
|
||||||
than the value `0`.)
|
that do find a match and for which the expression is true have the value `1`.
|
||||||
|
(Note that elements with no match or invalid operations involving histogram
|
||||||
|
samples still return no result rather than the value `0`.)
|
||||||
|
* The metric name is dropped.
|
||||||
|
|
||||||
|
If the `bool` modifier is not provided, then the metric name from the left side
|
||||||
|
is retained, with some exceptions:
|
||||||
|
|
||||||
|
* If `on` is used, then the metric name is dropped.
|
||||||
|
* If `group_right` is used, then the metric name from the right side is retained,
|
||||||
|
to avoid collisions.
|
||||||
|
|
||||||
### Logical/set binary operators
|
### Logical/set binary operators
|
||||||
|
|
||||||
|
@ -259,21 +284,21 @@ Prometheus supports the following built-in aggregation operators that can be
|
||||||
used to aggregate the elements of a single instant vector, resulting in a new
|
used to aggregate the elements of a single instant vector, resulting in a new
|
||||||
vector of fewer elements with aggregated values:
|
vector of fewer elements with aggregated values:
|
||||||
|
|
||||||
* `sum` (calculate sum over dimensions)
|
* `sum(v)` (calculate sum over dimensions)
|
||||||
* `avg` (calculate the arithmetic average over dimensions)
|
* `avg(v)` (calculate the arithmetic average over dimensions)
|
||||||
* `min` (select minimum over dimensions)
|
* `min(v)` (select minimum over dimensions)
|
||||||
* `max` (select maximum over dimensions)
|
* `max(v)` (select maximum over dimensions)
|
||||||
* `bottomk` (smallest _k_ elements by sample value)
|
* `bottomk(k, v)` (smallest `k` elements by sample value)
|
||||||
* `topk` (largest _k_ elements by sample value)
|
* `topk(k, v)` (largest `k` elements by sample value)
|
||||||
* `limitk` (sample _k_ elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
|
* `limitk(k, v)` (sample `k` elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
|
||||||
* `limit_ratio` (sample a pseudo-random ratio _r_ of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
|
* `limit_ratio(r, v)` (sample a pseudo-random ratio `r` of elements, **experimental**, must be enabled with `--enable-feature=promql-experimental-functions`)
|
||||||
* `group` (all values in the resulting vector are 1)
|
* `group(v)` (all values in the resulting vector are 1)
|
||||||
* `count` (count number of elements in the vector)
|
* `count(v)` (count number of elements in the vector)
|
||||||
* `count_values` (count number of elements with the same value)
|
* `count_values(l, v)` (count number of elements with the same value)
|
||||||
|
|
||||||
* `stddev` (calculate population standard deviation over dimensions)
|
* `stddev(v)` (calculate population standard deviation over dimensions)
|
||||||
* `stdvar` (calculate population standard variance over dimensions)
|
* `stdvar(v)` (calculate population standard variance over dimensions)
|
||||||
* `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
|
* `quantile(φ, v)` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
|
||||||
|
|
||||||
These operators can either be used to aggregate over **all** label dimensions
|
These operators can either be used to aggregate over **all** label dimensions
|
||||||
or preserve distinct dimensions by including a `without` or `by` clause. These
|
or preserve distinct dimensions by including a `without` or `by` clause. These
|
||||||
|
@ -293,29 +318,62 @@ all other labels are preserved in the output. `by` does the opposite and drops
|
||||||
labels that are not listed in the `by` clause, even if their label values are
|
labels that are not listed in the `by` clause, even if their label values are
|
||||||
identical between all elements of the vector.
|
identical between all elements of the vector.
|
||||||
|
|
||||||
`parameter` is only required for `topk`, `bottomk`, `limitk`, `limit_ratio`,
|
|
||||||
`quantile`, and `count_values`. It is used as the value for _k_, _r_, φ, or the
|
|
||||||
name of the additional label, respectively.
|
|
||||||
|
|
||||||
### Detailed explanations
|
### Detailed explanations
|
||||||
|
|
||||||
`sum` sums up sample values in the same way as the `+` binary operator does
|
#### `sum`
|
||||||
between two values. Similarly, `avg` divides the sum by the number of
|
|
||||||
aggregated samples in the same way as the `/` binary operator. Therefore, all
|
`sum(v)` sums up sample values in `v` in the same way as the `+` binary operator does
|
||||||
sample values aggregation into a single resulting vector element must either be
|
between two values.
|
||||||
|
|
||||||
|
All sample values being aggregated into a single resulting vector element must either be
|
||||||
float samples or histogram samples. An aggregation of a mix of both is invalid,
|
float samples or histogram samples. An aggregation of a mix of both is invalid,
|
||||||
resulting in the removeal of the corresponding vector element from the output
|
resulting in the removal of the corresponding vector element from the output
|
||||||
vector, flagged by a warn-level annotation.
|
vector, flagged by a warn-level annotation.
|
||||||
|
|
||||||
`min` and `max` only operate on float samples, following IEEE 754 floating
|
##### Examples
|
||||||
|
|
||||||
|
If the metric `memory_consumption_bytes` had time series that fan out by
|
||||||
|
`application`, `instance`, and `group` labels, we could calculate the total
|
||||||
|
memory consumption per application and group over all instances via:
|
||||||
|
|
||||||
|
sum without (instance) (memory_consumption_bytes)
|
||||||
|
|
||||||
|
Which is equivalent to:
|
||||||
|
|
||||||
|
sum by (application, group) (memory_consumption_bytes)
|
||||||
|
|
||||||
|
If we are just interested in the total memory consumption in **all**
|
||||||
|
applications, we could simply write:
|
||||||
|
|
||||||
|
sum(memory_consumption_bytes)
|
||||||
|
|
||||||
|
#### `avg`
|
||||||
|
|
||||||
|
`avg(v)` divides the sum of `v` by the number of aggregated samples in the same way
|
||||||
|
as the `/` binary operator.
|
||||||
|
|
||||||
|
All sample values being aggregated into a single resulting vector element must either be
|
||||||
|
float samples or histogram samples. An aggregation of a mix of both is invalid,
|
||||||
|
resulting in the removal of the corresponding vector element from the output
|
||||||
|
vector, flagged by a warn-level annotation.
|
||||||
|
|
||||||
|
#### `min` and `max`
|
||||||
|
|
||||||
|
`min(v)` and `max(v)` return the minimum or maximum value, respectively, in `v`.
|
||||||
|
|
||||||
|
They only operate on float samples, following IEEE 754 floating
|
||||||
point arithmetic, which in particular implies that `NaN` is only ever
|
point arithmetic, which in particular implies that `NaN` is only ever
|
||||||
considered a minimum or maximum if all aggregated values are `NaN`. Histogram
|
considered a minimum or maximum if all aggregated values are `NaN`. Histogram
|
||||||
samples in the input vector are ignored, flagged by an info-level annotation.
|
samples in the input vector are ignored, flagged by an info-level annotation.
|
||||||
|
|
||||||
`topk` and `bottomk` are different from other aggregators in that a subset of
|
#### `topk` and `bottomk`
|
||||||
the input samples, including the original labels, are returned in the result
|
|
||||||
vector. `by` and `without` are only used to bucket the input vector. Similar to
|
`topk(k, v)` and `bottomk(k, v)` are different from other aggregators in that a subset of
|
||||||
`min` and `max`, they only operate on float samples, considering `NaN` values
|
`k` values from the input samples, including the original labels, are returned in the result vector.
|
||||||
|
|
||||||
|
`by` and `without` are only used to bucket the input vector.
|
||||||
|
|
||||||
|
Similar to `min` and `max`, they only operate on float samples, considering `NaN` values
|
||||||
to be farthest from the top or bottom, respectively. Histogram samples in the
|
to be farthest from the top or bottom, respectively. Histogram samples in the
|
||||||
input vector are ignored, flagged by an info-level annotation.
|
input vector are ignored, flagged by an info-level annotation.
|
||||||
|
|
||||||
|
@ -323,72 +381,108 @@ If used in an instant query, `topk` and `bottomk` return series ordered by
|
||||||
value in descending or ascending order, respectively. If used with `by` or
|
value in descending or ascending order, respectively. If used with `by` or
|
||||||
`without`, then series within each bucket are sorted by value, and series in
|
`without`, then series within each bucket are sorted by value, and series in
|
||||||
the same bucket are returned consecutively, but there is no guarantee that
|
the same bucket are returned consecutively, but there is no guarantee that
|
||||||
buckets of series will be returned in any particular order. No sorting applies
|
buckets of series will be returned in any particular order.
|
||||||
to range queries.
|
|
||||||
|
|
||||||
`limitk` and `limit_ratio` also return a subset of the input samples, including
|
No sorting applies to range queries.
|
||||||
the original labels in the result vector. The subset is selected in a
|
|
||||||
deterministic pseudo-random way. `limitk` picks _k_ samples, while
|
##### Example
|
||||||
`limit_ratio` picks a ratio _r_ of samples (each determined by `parameter`).
|
|
||||||
This happens independent of the sample type. Therefore, it works for both float
|
To get the 5 instances with the highest memory consumption across all instances we could write:
|
||||||
samples and histogram samples. _r_ can be between +1 and -1. The absolute value
|
|
||||||
of _r_ is used as the selection ratio, but the selection order is inverted for
|
topk(5, memory_consumption_bytes)
|
||||||
a negative _r_, which can be used to select complements. For example,
|
|
||||||
`limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of
|
#### `limitk` and `limit_ratio`
|
||||||
|
|
||||||
|
`limitk(k, v)` returns a subset of `k` input samples, including
|
||||||
|
the original labels in the result vector.
|
||||||
|
|
||||||
|
The subset is selected in a deterministic pseudo-random way.
|
||||||
|
This happens independent of the sample type.
|
||||||
|
Therefore, it works for both float samples and histogram samples.
|
||||||
|
|
||||||
|
##### Example
|
||||||
|
|
||||||
|
To sample 10 timeseries we could write:
|
||||||
|
|
||||||
|
limitk(10, memory_consumption_bytes)
|
||||||
|
|
||||||
|
#### `limit_ratio`
|
||||||
|
|
||||||
|
`limit_ratio(r, v)` returns a subset of the input samples, including
|
||||||
|
the original labels in the result vector.
|
||||||
|
|
||||||
|
The subset is selected in a deterministic pseudo-random way.
|
||||||
|
This happens independent of the sample type.
|
||||||
|
Therefore, it works for both float samples and histogram samples.
|
||||||
|
|
||||||
|
`r` can be between +1 and -1. The absolute value of `r` is used as the selection ratio,
|
||||||
|
but the selection order is inverted for a negative `r`, which can be used to select complements.
|
||||||
|
For example, `limit_ratio(0.1, ...)` returns a deterministic set of approximatiely 10% of
|
||||||
the input samples, while `limit_ratio(-0.9, ...)` returns precisely the
|
the input samples, while `limit_ratio(-0.9, ...)` returns precisely the
|
||||||
remaining approximately 90% of the input samples not returned by
|
remaining approximately 90% of the input samples not returned by `limit_ratio(0.1, ...)`.
|
||||||
`limit_ratio(0.1, ...)`.
|
|
||||||
|
|
||||||
`group` and `count` do not interact with the sample values,
|
#### `group`
|
||||||
they work in the same way for float samples and histogram samples.
|
|
||||||
|
`group(v)` returns 1 for each group that contains any value at that timestamp.
|
||||||
|
|
||||||
|
The value may be a float or histogram sample.
|
||||||
|
|
||||||
|
#### `count`
|
||||||
|
|
||||||
|
`count(v)` returns the number of values at that timestamp, or no value at all
|
||||||
|
if no values are present at that timestamp.
|
||||||
|
|
||||||
|
The value may be a float or histogram sample.
|
||||||
|
|
||||||
|
#### `count_values`
|
||||||
|
|
||||||
|
`count_values(l, v)` outputs one time series per unique sample value in `v`.
|
||||||
|
Each series has an additional label, given by `l`, and the label value is the
|
||||||
|
unique sample value. The value of each time series is the number of times that sample value was present.
|
||||||
|
|
||||||
`count_values` outputs one time series per unique sample value. Each series has
|
|
||||||
an additional label. The name of that label is given by the aggregation
|
|
||||||
parameter, and the label value is the unique sample value. The value of each
|
|
||||||
time series is the number of times that sample value was present.
|
|
||||||
`count_values` works with both float samples and histogram samples. For the
|
`count_values` works with both float samples and histogram samples. For the
|
||||||
latter, a compact string representation of the histogram sample value is used
|
latter, a compact string representation of the histogram sample value is used
|
||||||
as the label value.
|
as the label value.
|
||||||
|
|
||||||
`stddev` and `stdvar` only work with float samples, following IEEE 754 floating
|
##### Example
|
||||||
point arithmetic. Histogram samples in the input vector are ignored, flagged by
|
|
||||||
an info-level annotation.
|
|
||||||
|
|
||||||
`quantile` calculates the φ-quantile, the value that ranks at number φ*N among
|
|
||||||
the N metric values of the dimensions aggregated over. φ is provided as the
|
|
||||||
aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
|
|
||||||
`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned.
|
|
||||||
For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
If the metric `http_requests_total` had time series that fan out by
|
|
||||||
`application`, `instance`, and `group` labels, we could calculate the total
|
|
||||||
number of seen HTTP requests per application and group over all instances via:
|
|
||||||
|
|
||||||
sum without (instance) (http_requests_total)
|
|
||||||
|
|
||||||
Which is equivalent to:
|
|
||||||
|
|
||||||
sum by (application, group) (http_requests_total)
|
|
||||||
|
|
||||||
If we are just interested in the total of HTTP requests we have seen in **all**
|
|
||||||
applications, we could simply write:
|
|
||||||
|
|
||||||
sum(http_requests_total)
|
|
||||||
|
|
||||||
To count the number of binaries running each build version we could write:
|
To count the number of binaries running each build version we could write:
|
||||||
|
|
||||||
count_values("version", build_version)
|
count_values("version", build_version)
|
||||||
|
|
||||||
To get the 5 largest HTTP requests counts across all instances we could write:
|
#### `stddev`
|
||||||
|
|
||||||
topk(5, http_requests_total)
|
`stddev(v)` returns the standard deviation of `v`.
|
||||||
|
|
||||||
To sample 10 timeseries, for example to inspect labels and their values, we
|
`stddev` only works with float samples, following IEEE 754 floating
|
||||||
could write:
|
point arithmetic. Histogram samples in the input vector are ignored, flagged by
|
||||||
|
an info-level annotation.
|
||||||
|
|
||||||
limitk(10, http_requests_total)
|
#### `stdvar`
|
||||||
|
|
||||||
|
`stdvar(v)` returns the standard deviation of `v`.
|
||||||
|
|
||||||
|
`stdvar` only works with float samples, following IEEE 754 floating
|
||||||
|
point arithmetic. Histogram samples in the input vector are ignored, flagged by
|
||||||
|
an info-level annotation.
|
||||||
|
|
||||||
|
#### `quantile`
|
||||||
|
|
||||||
|
`quantile(φ, v)` calculates the φ-quantile, the value that ranks at number φ*N among
|
||||||
|
the N metric values of the dimensions aggregated over.
|
||||||
|
|
||||||
|
`quantile` only works with float samples. Histogram samples in the input vector
|
||||||
|
are ignored, flagged by an info-level annotation.
|
||||||
|
|
||||||
|
`NaN` is considered the smallest possible value.
|
||||||
|
|
||||||
|
For example, `quantile(0.5, ...)` calculates the median, `quantile(0.95, ...)` the 95th percentile.
|
||||||
|
|
||||||
|
Special cases:
|
||||||
|
|
||||||
|
* For φ = `NaN`, `NaN` is returned.
|
||||||
|
* For φ < 0, `-Inf` is returned.
|
||||||
|
* For φ > 1, `+Inf` is returned.
|
||||||
|
|
||||||
## Binary operator precedence
|
## Binary operator precedence
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
# TLS and basic authentication configuration example.
|
||||||
|
|
||||||
|
# For `basic_auth`, use the Prometheus credentials configured earlier in the `web-config.yml` file.
|
||||||
|
# The password must be provided in plaintext.
|
||||||
|
# To avoid including plaintext passwords directly in this file, consider using `password_file` or `password_ref` instead.
|
||||||
|
basic_auth:
|
||||||
|
username: alice
|
||||||
|
password: verylongpassword
|
||||||
|
|
||||||
|
tls_config:
|
||||||
|
cert_file: server.crt
|
||||||
|
key_file: server.key
|
|
@ -445,6 +445,7 @@ local row = panel.row;
|
||||||
dashboard.new('%(prefix)sOverview' % $._config.grafanaPrometheus)
|
dashboard.new('%(prefix)sOverview' % $._config.grafanaPrometheus)
|
||||||
+ dashboard.time.withFrom('now-1h')
|
+ dashboard.time.withFrom('now-1h')
|
||||||
+ dashboard.withTags($._config.grafanaPrometheus.tags)
|
+ dashboard.withTags($._config.grafanaPrometheus.tags)
|
||||||
|
+ dashboard.withUid('9fa0d141-d019-4ad7-8bc5-42196ee308bd')
|
||||||
+ dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh)
|
+ dashboard.timepicker.withRefreshIntervals($._config.grafanaPrometheus.refresh)
|
||||||
+ dashboard.withVariables(std.prune([
|
+ dashboard.withVariables(std.prune([
|
||||||
datasourceVariable,
|
datasourceVariable,
|
||||||
|
|
46
go.mod
46
go.mod
|
@ -15,8 +15,8 @@ require (
|
||||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
|
||||||
github.com/cespare/xxhash/v2 v2.3.0
|
github.com/cespare/xxhash/v2 v2.3.0
|
||||||
github.com/dennwc/varint v1.0.0
|
github.com/dennwc/varint v1.0.0
|
||||||
github.com/digitalocean/godo v1.152.0
|
github.com/digitalocean/godo v1.157.0
|
||||||
github.com/docker/docker v28.2.2+incompatible
|
github.com/docker/docker v28.3.0+incompatible
|
||||||
github.com/edsrzf/mmap-go v1.2.0
|
github.com/edsrzf/mmap-go v1.2.0
|
||||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.2.1
|
github.com/envoyproxy/protoc-gen-validate v1.2.1
|
||||||
|
@ -38,19 +38,19 @@ require (
|
||||||
github.com/json-iterator/go v1.1.12
|
github.com/json-iterator/go v1.1.12
|
||||||
github.com/klauspost/compress v1.18.0
|
github.com/klauspost/compress v1.18.0
|
||||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||||
github.com/linode/linodego v1.52.1
|
github.com/linode/linodego v1.52.2
|
||||||
github.com/miekg/dns v1.1.66
|
github.com/miekg/dns v1.1.66
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||||
github.com/oklog/run v1.1.0
|
github.com/oklog/run v1.2.0
|
||||||
github.com/oklog/ulid/v2 v2.1.1
|
github.com/oklog/ulid/v2 v2.1.1
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0
|
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0
|
||||||
github.com/ovh/go-ovh v1.8.0
|
github.com/ovh/go-ovh v1.9.0
|
||||||
github.com/prometheus/alertmanager v0.28.1
|
github.com/prometheus/alertmanager v0.28.1
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/prometheus/client_golang v1.22.0
|
||||||
github.com/prometheus/client_model v0.6.2
|
github.com/prometheus/client_model v0.6.2
|
||||||
github.com/prometheus/common v0.65.0
|
github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3
|
||||||
github.com/prometheus/common/assets v0.2.0
|
github.com/prometheus/common/assets v0.2.0
|
||||||
github.com/prometheus/exporter-toolkit v0.14.0
|
github.com/prometheus/exporter-toolkit v0.14.0
|
||||||
github.com/prometheus/sigv4 v0.2.0
|
github.com/prometheus/sigv4 v0.2.0
|
||||||
|
@ -59,10 +59,10 @@ require (
|
||||||
github.com/stackitcloud/stackit-sdk-go/core v0.17.2
|
github.com/stackitcloud/stackit-sdk-go/core v0.17.2
|
||||||
github.com/stretchr/testify v1.10.0
|
github.com/stretchr/testify v1.10.0
|
||||||
github.com/vultr/govultr/v2 v2.17.2
|
github.com/vultr/govultr/v2 v2.17.2
|
||||||
go.opentelemetry.io/collector/component v1.34.0
|
go.opentelemetry.io/collector/component v1.35.0
|
||||||
go.opentelemetry.io/collector/consumer v1.34.0
|
go.opentelemetry.io/collector/consumer v1.35.0
|
||||||
go.opentelemetry.io/collector/pdata v1.34.0
|
go.opentelemetry.io/collector/pdata v1.35.0
|
||||||
go.opentelemetry.io/collector/processor v1.34.0
|
go.opentelemetry.io/collector/processor v1.35.0
|
||||||
go.opentelemetry.io/collector/semconv v0.128.0
|
go.opentelemetry.io/collector/semconv v0.128.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0
|
||||||
|
@ -81,7 +81,7 @@ require (
|
||||||
golang.org/x/sync v0.15.0
|
golang.org/x/sync v0.15.0
|
||||||
golang.org/x/sys v0.33.0
|
golang.org/x/sys v0.33.0
|
||||||
golang.org/x/text v0.26.0
|
golang.org/x/text v0.26.0
|
||||||
google.golang.org/api v0.238.0
|
google.golang.org/api v0.239.0
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
|
||||||
google.golang.org/grpc v1.73.0
|
google.golang.org/grpc v1.73.0
|
||||||
google.golang.org/protobuf v1.36.6
|
google.golang.org/protobuf v1.36.6
|
||||||
|
@ -115,8 +115,8 @@ require (
|
||||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||||
go.opentelemetry.io/collector/featuregate v1.34.0 // indirect
|
go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
|
||||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect
|
go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
|
||||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
|
||||||
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
go.opentelemetry.io/otel/log v0.12.2 // indirect
|
||||||
)
|
)
|
||||||
|
@ -153,7 +153,7 @@ require (
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
github.com/go-openapi/swag v0.23.0 // indirect
|
||||||
github.com/go-openapi/validate v0.24.0 // indirect
|
github.com/go-openapi/validate v0.24.0 // indirect
|
||||||
github.com/go-resty/resty/v2 v2.16.5 // indirect
|
github.com/go-resty/resty/v2 v2.16.5 // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/gnostic-models v0.6.8 // indirect
|
github.com/google/gnostic-models v0.6.8 // indirect
|
||||||
|
@ -180,7 +180,7 @@ require (
|
||||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||||
github.com/knadh/koanf/maps v0.1.2 // indirect
|
github.com/knadh/koanf/maps v0.1.2 // indirect
|
||||||
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
|
github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
|
||||||
github.com/knadh/koanf/v2 v2.2.0 // indirect
|
github.com/knadh/koanf/v2 v2.2.1 // indirect
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
|
@ -197,8 +197,8 @@ require (
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/oklog/ulid v1.3.1 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect
|
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect
|
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||||
|
@ -206,7 +206,7 @@ require (
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5
|
github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/stretchr/objx v0.5.2 // indirect
|
github.com/stretchr/objx v0.5.2 // indirect
|
||||||
|
@ -214,9 +214,9 @@ require (
|
||||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
go.opentelemetry.io/collector/confmap v1.34.0 // indirect
|
go.opentelemetry.io/collector/confmap v1.35.0 // indirect
|
||||||
go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect
|
go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
|
||||||
go.opentelemetry.io/collector/pipeline v0.128.0 // indirect
|
go.opentelemetry.io/collector/pipeline v0.129.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
golang.org/x/crypto v0.39.0 // indirect
|
golang.org/x/crypto v0.39.0 // indirect
|
||||||
|
@ -225,7 +225,7 @@ require (
|
||||||
golang.org/x/net v0.41.0 // indirect
|
golang.org/x/net v0.41.0 // indirect
|
||||||
golang.org/x/term v0.32.0 // indirect
|
golang.org/x/term v0.32.0 // indirect
|
||||||
golang.org/x/time v0.12.0 // indirect
|
golang.org/x/time v0.12.0 // indirect
|
||||||
golang.org/x/tools v0.33.0 // indirect
|
golang.org/x/tools v0.34.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
|
128
go.sum
128
go.sum
|
@ -110,14 +110,14 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
github.com/digitalocean/godo v1.152.0 h1:WRgkPMogZSXEJK70IkZKTB/PsMn16hMQ+NI3wCIQdzA=
|
github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o=
|
||||||
github.com/digitalocean/godo v1.152.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
|
github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
|
||||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||||
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
|
github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ=
|
||||||
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
@ -176,8 +176,8 @@ github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
|
||||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||||
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
|
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
|
||||||
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||||
|
@ -314,8 +314,8 @@ github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpb
|
||||||
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
|
github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
|
||||||
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
|
github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
|
||||||
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
|
github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
|
||||||
github.com/knadh/koanf/v2 v2.2.0 h1:FZFwd9bUjpb8DyCWARUBy5ovuhDs1lI87dOEn2K8UVU=
|
github.com/knadh/koanf/v2 v2.2.1 h1:jaleChtw85y3UdBnI0wCqcg1sj1gPoz6D3caGNHtrNE=
|
||||||
github.com/knadh/koanf/v2 v2.2.0/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
|
github.com/knadh/koanf/v2 v2.2.1/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
|
||||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -329,8 +329,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/linode/linodego v1.52.1 h1:HJ1cz1n9n3chRP9UrtqmP91+xTi0Q5l+H/4z4tpkwgQ=
|
github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg=
|
||||||
github.com/linode/linodego v1.52.1/go.mod h1:zEN2sX+cSdp67EuRY1HJiyuLujoa7HqvVwNEcJv3iXw=
|
github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
@ -395,8 +395,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM=
|
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM=
|
||||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
|
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
|
||||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=
|
||||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=
|
||||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
|
||||||
|
@ -405,20 +405,20 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM
|
||||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 h1:hZa4FkI2JhYC0tkiwOepnHyyfWzezz3FfCmt88nWJa0=
|
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0/go.mod h1:sLbOuJEFckPdw4li0RtWpoSsMeppcck3s/cmzPyKAgc=
|
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0 h1:+rUULr4xqOJjZK3SokFmRYzsiPq5onoWoSv3He4aaus=
|
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0/go.mod h1:Fh2SXPeFkr4J97w9CV/apFAib8TC9Hi0P08xtiT7Lng=
|
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 h1:8OWwRSdIhm3DY3PEYJ0PtSEz1a1OjL0fghLXSr14JMk=
|
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0/go.mod h1:32OeaysZe4vkSmD1LJ18Q1DfooryYqpSzFNmz+5A5RU=
|
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 h1:9wVFaWEhgV8WQD+nP662nHNaQIkmyF57KRhtsqlaWEI=
|
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY=
|
||||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0/go.mod h1:Yak3vQIvwYQiAO83u+zD9ujdCmpcDL7JSfg2YK+Mwn4=
|
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/ovh/go-ovh v1.8.0 h1:eQ5TAAFZvZAVarQir62oaTL+8a503pIBuOWVn72iGtY=
|
github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
|
||||||
github.com/ovh/go-ovh v1.8.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
|
@ -454,14 +454,14 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
|
||||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0=
|
||||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||||
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
|
||||||
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
|
||||||
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
|
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
|
||||||
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
|
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
|
||||||
github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5 h1:LCbPeVKZSu9RS4CsaDCOmDCcribskJ8c6H5u1VvyxY0=
|
github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 h1:QlySqDdSESgWDePeAYskbbcKKdowI26m9aU9zloHyYE=
|
||||||
github.com/prometheus/otlptranslator v0.0.0-20250527173959-2573485683d5/go.mod h1:v1PzmPjSnNkmZSDvKJ9OmsWcmWMEF5+JdllEcXrRfzM=
|
github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
|
@ -522,40 +522,40 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
|
||||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||||
go.opentelemetry.io/collector/component v1.34.0 h1:YONg7FaZ5zZbj5cLdARvwtMNuZHunuyxw2fWe5fcWqc=
|
go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ=
|
||||||
go.opentelemetry.io/collector/component v1.34.0/go.mod h1:GvolsSVZskXuyfQdwYacqeBSZe/1tg4RJ0YK55KSvDA=
|
go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc=
|
||||||
go.opentelemetry.io/collector/component/componentstatus v0.128.0 h1:0lEYHgUQEMMkl5FLtMgDH8lue4B3auElQINzGIWUya4=
|
go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw=
|
||||||
go.opentelemetry.io/collector/component/componentstatus v0.128.0/go.mod h1:8vVO6JSV+edmiezJsQzW7aKQ7sFLIN6S3JawKBI646o=
|
go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI=
|
||||||
go.opentelemetry.io/collector/component/componenttest v0.128.0 h1:MGNh5lQQ0Qmz2SmNwOqLJYaWMDkMLYj/51wjMzTBR34=
|
go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI=
|
||||||
go.opentelemetry.io/collector/component/componenttest v0.128.0/go.mod h1:hALNxcacqOaX/Gm/dE7sNOxAEFj41SbRqtvF57Yd6gs=
|
go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM=
|
||||||
go.opentelemetry.io/collector/confmap v1.34.0 h1:PG4sYlLxgCMnA5F7daKXZV+NKjU1IzXBzVQeyvcwyh0=
|
go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ=
|
||||||
go.opentelemetry.io/collector/confmap v1.34.0/go.mod h1:BbAit8+hAJg5vyFBQoDh9vOXOH8UzCdNu91jCh+b72E=
|
go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0=
|
||||||
go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 h1:hcVKU45pjC+PLz7xUc8kwSlR5wsN2w8hs9midZ3ez10=
|
go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk=
|
||||||
go.opentelemetry.io/collector/confmap/xconfmap v0.128.0/go.mod h1:2928x4NAAu1CysfzLbEJE6MSSDB/gOYVq6YRGWY9LmM=
|
go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw=
|
||||||
go.opentelemetry.io/collector/consumer v1.34.0 h1:oBhHH6mgViOGhVDPozE+sUdt7jFBo2Hh32lsSr2L3Tc=
|
go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg=
|
||||||
go.opentelemetry.io/collector/consumer v1.34.0/go.mod h1:DVMCb56ZBlPNcmo0lSJKn3rp18oyZQCedRE4GKIMI+Q=
|
go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs=
|
||||||
go.opentelemetry.io/collector/consumer/consumertest v0.128.0 h1:x50GB0I/QvU3sQuNCap5z/P2cnq2yHoRJ/8awkiT87w=
|
go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU=
|
||||||
go.opentelemetry.io/collector/consumer/consumertest v0.128.0/go.mod h1:Wb3IAbMY/DOIwJPy81PuBiW2GnKoNIz4THE7wfJwovE=
|
go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY=
|
||||||
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 h1:4E+KTdCjkRS3SIw0bsv5kpv9XFXHf8x9YiPEuxBVEHY=
|
go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A=
|
||||||
go.opentelemetry.io/collector/consumer/xconsumer v0.128.0/go.mod h1:OmzilL/qbjCzPMHay+WEA7/cPe5xuX7Jbj5WPIpqaMo=
|
go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY=
|
||||||
go.opentelemetry.io/collector/featuregate v1.34.0 h1:zqDHpEYy1UeudrfUCvlcJL2t13dXywrC6lwpNZ5DrCU=
|
go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4=
|
||||||
go.opentelemetry.io/collector/featuregate v1.34.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
|
go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
|
||||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0 h1:ySEYWoY7J8DAYdlw2xlF0w+ODQi3AhYj7TRNflsCbx8=
|
go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA=
|
||||||
go.opentelemetry.io/collector/internal/telemetry v0.128.0/go.mod h1:572B/iJqjauv3aT+zcwnlNWBPqM7+KqrYGSUuOAStrM=
|
go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw=
|
||||||
go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8=
|
go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E=
|
||||||
go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI=
|
go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw=
|
||||||
go.opentelemetry.io/collector/pdata/pprofile v0.128.0 h1:6DEtzs/liqv/ukz2EHbC5OMaj2V6K2pzuj/LaRg2YmY=
|
go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ=
|
||||||
go.opentelemetry.io/collector/pdata/pprofile v0.128.0/go.mod h1:bVVRpz+zKFf1UCCRUFqy8LvnO3tHlXKkdqW2d+Wi/iA=
|
go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE=
|
||||||
go.opentelemetry.io/collector/pdata/testdata v0.128.0 h1:5xcsMtyzvb18AnS2skVtWreQP1nl6G3PiXaylKCZ6pA=
|
go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY=
|
||||||
go.opentelemetry.io/collector/pdata/testdata v0.128.0/go.mod h1:9/VYVgzv3JMuIyo19KsT3FwkVyxbh3Eg5QlabQEUczA=
|
go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY=
|
||||||
go.opentelemetry.io/collector/pipeline v0.128.0 h1:WgNXdFbyf/QRLy5XbO/jtPQosWrSWX/TEnSYpJq8bgI=
|
go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU=
|
||||||
go.opentelemetry.io/collector/pipeline v0.128.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
|
||||||
go.opentelemetry.io/collector/processor v1.34.0 h1:5pwXIG12XXxdkJ8F68e2cBEjEnFlCIAZhqEYM7vjkqE=
|
go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs=
|
||||||
go.opentelemetry.io/collector/processor v1.34.0/go.mod h1:VCl4vYj2tdO4APUcr0q6Eh796mqCCsH9Z/gqaPuzlUs=
|
go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM=
|
||||||
go.opentelemetry.io/collector/processor/processortest v0.128.0 h1:xPhOSmGFDGqhC3/nu1BqPSE6EpDPAf1/F+BfaYjDn/8=
|
go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY=
|
||||||
go.opentelemetry.io/collector/processor/processortest v0.128.0/go.mod h1:XXXom+mbAQtrkcvq4Ecd6n8RQoVgcfLe1vrUlr6U2gI=
|
go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc=
|
||||||
go.opentelemetry.io/collector/processor/xprocessor v0.128.0 h1:ObbtdXab0is6bdt4XabsRJZ+SUTuwQjPVlHTbmScfNg=
|
go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc=
|
||||||
go.opentelemetry.io/collector/processor/xprocessor v0.128.0/go.mod h1:/nHXW15nzwSRQ+25Cb+r17he/uMtCEvSOBGqpDbn3Uk=
|
go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00=
|
||||||
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
|
go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
|
||||||
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
|
go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
|
||||||
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw=
|
go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw=
|
||||||
|
@ -675,14 +675,14 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs=
|
google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo=
|
||||||
google.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
|
google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
|
||||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
|
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
|
||||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
|
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||||
|
|
|
@ -6,7 +6,7 @@ require (
|
||||||
github.com/bufbuild/buf v1.51.0
|
github.com/bufbuild/buf v1.51.0
|
||||||
github.com/daixiang0/gci v0.13.6
|
github.com/daixiang0/gci v0.13.6
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -18,7 +18,7 @@ require (
|
||||||
buf.build/go/bufplugin v0.8.0 // indirect
|
buf.build/go/bufplugin v0.8.0 // indirect
|
||||||
buf.build/go/protoyaml v0.3.1 // indirect
|
buf.build/go/protoyaml v0.3.1 // indirect
|
||||||
buf.build/go/spdx v0.2.0 // indirect
|
buf.build/go/spdx v0.2.0 // indirect
|
||||||
cel.dev/expr v0.21.2 // indirect
|
cel.dev/expr v0.23.0 // indirect
|
||||||
connectrpc.com/connect v1.18.1 // indirect
|
connectrpc.com/connect v1.18.1 // indirect
|
||||||
connectrpc.com/otelconnect v0.7.2 // indirect
|
connectrpc.com/otelconnect v0.7.2 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||||
|
@ -97,19 +97,19 @@ require (
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
go.uber.org/zap/exp v0.3.0 // indirect
|
go.uber.org/zap/exp v0.3.0 // indirect
|
||||||
golang.org/x/crypto v0.36.0 // indirect
|
golang.org/x/crypto v0.38.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect
|
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect
|
||||||
golang.org/x/mod v0.24.0 // indirect
|
golang.org/x/mod v0.25.0 // indirect
|
||||||
golang.org/x/net v0.38.0 // indirect
|
golang.org/x/net v0.40.0 // indirect
|
||||||
golang.org/x/sync v0.12.0 // indirect
|
golang.org/x/sync v0.15.0 // indirect
|
||||||
golang.org/x/sys v0.31.0 // indirect
|
golang.org/x/sys v0.33.0 // indirect
|
||||||
golang.org/x/term v0.30.0 // indirect
|
golang.org/x/term v0.32.0 // indirect
|
||||||
golang.org/x/text v0.23.0 // indirect
|
golang.org/x/text v0.26.0 // indirect
|
||||||
golang.org/x/tools v0.31.0 // indirect
|
golang.org/x/tools v0.33.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
google.golang.org/grpc v1.70.0 // indirect
|
google.golang.org/grpc v1.73.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.5 // indirect
|
google.golang.org/protobuf v1.36.6 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
pluginrpc.com/pluginrpc v0.5.0 // indirect
|
pluginrpc.com/pluginrpc v0.5.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -14,8 +14,8 @@ buf.build/go/protoyaml v0.3.1 h1:ucyzE7DRnjX+mQ6AH4JzN0Kg50ByHHu+yrSKbgQn2D4=
|
||||||
buf.build/go/protoyaml v0.3.1/go.mod h1:0TzNpFQDXhwbkXb/ajLvxIijqbve+vMQvWY/b3/Dzxg=
|
buf.build/go/protoyaml v0.3.1/go.mod h1:0TzNpFQDXhwbkXb/ajLvxIijqbve+vMQvWY/b3/Dzxg=
|
||||||
buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw=
|
buf.build/go/spdx v0.2.0 h1:IItqM0/cMxvFJJumcBuP8NrsIzMs/UYjp/6WSpq8LTw=
|
||||||
buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8=
|
buf.build/go/spdx v0.2.0/go.mod h1:bXdwQFem9Si3nsbNy8aJKGPoaPi5DKwdeEp5/ArZ6w8=
|
||||||
cel.dev/expr v0.21.2 h1:o+Wj235dy4gFYlYin3JsMpp3EEfMrPm/6tdoyjT98S0=
|
cel.dev/expr v0.23.0 h1:wUb94w6OYQS4uXraxo9U+wUAs9jT47Xvl4iPgAwM2ss=
|
||||||
cel.dev/expr v0.21.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
cel.dev/expr v0.23.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||||
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
|
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
|
||||||
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
||||||
connectrpc.com/otelconnect v0.7.2 h1:WlnwFzaW64dN06JXU+hREPUGeEzpz3Acz2ACOmN8cMI=
|
connectrpc.com/otelconnect v0.7.2 h1:WlnwFzaW64dN06JXU+hREPUGeEzpz3Acz2ACOmN8cMI=
|
||||||
|
@ -112,8 +112,8 @@ github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo7
|
||||||
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||||
|
@ -260,25 +260,25 @@ go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||||
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c=
|
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c=
|
||||||
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
@ -288,34 +288,34 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
|
||||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
|
||||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
|
|
@ -248,6 +248,17 @@ func (ls Labels) WithoutEmpty() Labels {
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByteSize returns the approximate size of the labels in bytes including
|
||||||
|
// the two string headers size for name and value.
|
||||||
|
// Slice header size is ignored because it should be amortized to zero.
|
||||||
|
func (ls Labels) ByteSize() uint64 {
|
||||||
|
var size uint64 = 0
|
||||||
|
for _, l := range ls {
|
||||||
|
size += uint64(len(l.Name)+len(l.Value)) + 2*uint64(unsafe.Sizeof(""))
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
// Equal returns whether the two label sets are equal.
|
// Equal returns whether the two label sets are equal.
|
||||||
func Equal(ls, o Labels) bool {
|
func Equal(ls, o Labels) bool {
|
||||||
return slices.Equal(ls, o)
|
return slices.Equal(ls, o)
|
||||||
|
|
|
@ -417,6 +417,13 @@ func (ls Labels) WithoutEmpty() Labels {
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByteSize returns the approximate size of the labels in bytes.
|
||||||
|
// String header size is ignored because it should be amortized to zero.
|
||||||
|
// SymbolTable size is also not taken into account.
|
||||||
|
func (ls Labels) ByteSize() uint64 {
|
||||||
|
return uint64(len(ls.data))
|
||||||
|
}
|
||||||
|
|
||||||
// Equal returns whether the two label sets are equal.
|
// Equal returns whether the two label sets are equal.
|
||||||
func Equal(a, b Labels) bool {
|
func Equal(a, b Labels) bool {
|
||||||
if a.syms == b.syms {
|
if a.syms == b.syms {
|
||||||
|
|
|
@ -21,6 +21,24 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels.
|
||||||
|
16,
|
||||||
|
0,
|
||||||
|
41,
|
||||||
|
270,
|
||||||
|
271,
|
||||||
|
325,
|
||||||
|
}
|
||||||
|
|
||||||
|
var expectedByteSize = []uint64{ // Values must line up with testCaseLabels.
|
||||||
|
8,
|
||||||
|
0,
|
||||||
|
8,
|
||||||
|
8,
|
||||||
|
8,
|
||||||
|
32,
|
||||||
|
}
|
||||||
|
|
||||||
func TestVarint(t *testing.T) {
|
func TestVarint(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
v int
|
v int
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2025 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build slicelabels
|
||||||
|
|
||||||
|
package labels
|
||||||
|
|
||||||
|
var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels.
|
||||||
|
72,
|
||||||
|
0,
|
||||||
|
97,
|
||||||
|
326,
|
||||||
|
327,
|
||||||
|
549,
|
||||||
|
}
|
||||||
|
|
||||||
|
var expectedByteSize = expectedSizeOfLabels // They are identical
|
|
@ -283,6 +283,13 @@ func (ls Labels) WithoutEmpty() Labels {
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByteSize returns the approximate size of the labels in bytes.
|
||||||
|
// String header size is ignored because it should be amortized to zero
|
||||||
|
// because it may be shared across multiple copies of the Labels.
|
||||||
|
func (ls Labels) ByteSize() uint64 {
|
||||||
|
return uint64(len(ls.data))
|
||||||
|
}
|
||||||
|
|
||||||
// Equal returns whether the two label sets are equal.
|
// Equal returns whether the two label sets are equal.
|
||||||
func Equal(ls, o Labels) bool {
|
func Equal(ls, o Labels) bool {
|
||||||
return ls.data == o.data
|
return ls.data == o.data
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright 2025 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !slicelabels && !dedupelabels
|
||||||
|
|
||||||
|
package labels
|
||||||
|
|
||||||
|
var expectedSizeOfLabels = []uint64{ // Values must line up with testCaseLabels.
|
||||||
|
12,
|
||||||
|
0,
|
||||||
|
37,
|
||||||
|
266,
|
||||||
|
270,
|
||||||
|
309,
|
||||||
|
}
|
||||||
|
|
||||||
|
var expectedByteSize = []uint64{ // Values must line up with testCaseLabels.
|
||||||
|
12,
|
||||||
|
0,
|
||||||
|
37,
|
||||||
|
266,
|
||||||
|
270,
|
||||||
|
309,
|
||||||
|
}
|
|
@ -26,37 +26,33 @@ import (
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
s254 = strings.Repeat("x", 254) // Edge cases for stringlabels encoding.
|
||||||
|
s255 = strings.Repeat("x", 255)
|
||||||
|
)
|
||||||
|
|
||||||
|
var testCaseLabels = []Labels{
|
||||||
|
FromStrings("t1", "t1", "t2", "t2"),
|
||||||
|
{},
|
||||||
|
FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
|
||||||
|
FromStrings("aaa", "111", "xx", s254),
|
||||||
|
FromStrings("aaa", "111", "xx", s255),
|
||||||
|
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
|
||||||
|
}
|
||||||
|
|
||||||
func TestLabels_String(t *testing.T) {
|
func TestLabels_String(t *testing.T) {
|
||||||
s254 := strings.Repeat("x", 254) // Edge cases for stringlabels encoding.
|
expected := []string{ // Values must line up with testCaseLabels.
|
||||||
s255 := strings.Repeat("x", 255)
|
"{t1=\"t1\", t2=\"t2\"}",
|
||||||
cases := []struct {
|
"{}",
|
||||||
labels Labels
|
`{"service.name"="t1", "whatever\\whatever"="t2"}`,
|
||||||
expected string
|
`{aaa="111", xx="` + s254 + `"}`,
|
||||||
}{
|
`{aaa="111", xx="` + s255 + `"}`,
|
||||||
{
|
`{" container"="prometheus", " namespace"="observability-prometheus", __name__="kube_pod_container_status_last_terminated_exitcode", cluster="prod-af-north-0", instance="kube-state-metrics-0:kube-state-metrics:ksm", job="kube-state-metrics/kube-state-metrics", pod="observability-prometheus-0", uid="d3ec90b2-4975-4607-b45d-b9ad64bb417e"}`,
|
||||||
labels: FromStrings("t1", "t1", "t2", "t2"),
|
|
||||||
expected: "{t1=\"t1\", t2=\"t2\"}",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels: Labels{},
|
|
||||||
expected: "{}",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
|
|
||||||
expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels: FromStrings("aaa", "111", "xx", s254),
|
|
||||||
expected: `{aaa="111", xx="` + s254 + `"}`,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
labels: FromStrings("aaa", "111", "xx", s255),
|
|
||||||
expected: `{aaa="111", xx="` + s255 + `"}`,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
require.Len(t, expected, len(testCaseLabels))
|
||||||
str := c.labels.String()
|
for i, c := range expected {
|
||||||
require.Equal(t, c.expected, str)
|
str := testCaseLabels[i].String()
|
||||||
|
require.Equal(t, c, str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,6 +63,44 @@ func BenchmarkString(b *testing.B) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSizeOfLabels(t *testing.T) {
|
||||||
|
require.Len(t, expectedSizeOfLabels, len(testCaseLabels))
|
||||||
|
for i, c := range expectedSizeOfLabels { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go.
|
||||||
|
var total uint64
|
||||||
|
testCaseLabels[i].Range(func(l Label) {
|
||||||
|
total += SizeOfLabels(l.Name, l.Value, 1)
|
||||||
|
})
|
||||||
|
require.Equal(t, c, total)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestByteSize(t *testing.T) {
|
||||||
|
require.Len(t, expectedByteSize, len(testCaseLabels))
|
||||||
|
for i, c := range expectedByteSize { // Declared in build-tag-specific files, e.g. labels_slicelabels_test.go.
|
||||||
|
require.Equal(t, c, testCaseLabels[i].ByteSize())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var GlobalTotal uint64 // Encourage the compiler not to elide the benchmark computation.
|
||||||
|
|
||||||
|
func BenchmarkSize(b *testing.B) {
|
||||||
|
lb := New(benchmarkLabels...)
|
||||||
|
b.Run("SizeOfLabels", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
var total uint64
|
||||||
|
lb.Range(func(l Label) {
|
||||||
|
total += SizeOfLabels(l.Name, l.Value, 1)
|
||||||
|
})
|
||||||
|
GlobalTotal = total
|
||||||
|
}
|
||||||
|
})
|
||||||
|
b.Run("ByteSize", func(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
GlobalTotal = lb.ByteSize()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestLabels_MatchLabels(t *testing.T) {
|
func TestLabels_MatchLabels(t *testing.T) {
|
||||||
labels := FromStrings(
|
labels := FromStrings(
|
||||||
"__name__", "ALERTS",
|
"__name__", "ALERTS",
|
||||||
|
|
|
@ -49,7 +49,7 @@ sum without(instance) (rate(requests_total[5m]))
|
||||||
|
|
||||||
require.Equal(t, "HighAlert", rg.Rules[2].Alert)
|
require.Equal(t, "HighAlert", rg.Rules[2].Alert)
|
||||||
require.Equal(t, "critical", rg.Rules[2].Labels["severity"])
|
require.Equal(t, "critical", rg.Rules[2].Labels["severity"])
|
||||||
require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[0].Annotations["description"])
|
require.Equal(t, "stuff's happening with {{ $.labels.service }}", rg.Rules[2].Annotations["description"])
|
||||||
|
|
||||||
require.Equal(t, "HighAlert2", rg.Rules[3].Alert)
|
require.Equal(t, "HighAlert2", rg.Rules[3].Alert)
|
||||||
require.Equal(t, "critical", rg.Rules[3].Labels["severity"])
|
require.Equal(t, "critical", rg.Rules[3].Labels["severity"])
|
||||||
|
|
|
@ -254,7 +254,10 @@ func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group)
|
||||||
select {
|
select {
|
||||||
case <-n.stopRequested:
|
case <-n.stopRequested:
|
||||||
return
|
return
|
||||||
case ts := <-tsets:
|
case ts, ok := <-tsets:
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
n.reload(ts)
|
n.reload(ts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,14 +21,25 @@ import (
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
)
|
)
|
||||||
|
|
||||||
// durationVisitor is a visitor that visits a duration expression and calculates the duration.
|
// durationVisitor is a visitor that calculates the actual value of
|
||||||
type durationVisitor struct{}
|
// duration expressions in AST nodes. For example the query
|
||||||
|
// "http_requests_total offset (1h / 2)" is represented in the AST
|
||||||
|
// as a VectorSelector with OriginalOffset 0 and the duration expression
|
||||||
|
// in OriginalOffsetExpr representing (1h / 2). This visitor evaluates
|
||||||
|
// such duration expression, setting OriginalOffset to 30m.
|
||||||
|
type durationVisitor struct {
|
||||||
|
step time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit finds any duration expressions in AST Nodes and modifies the Node to
|
||||||
|
// store the concrete value. Note that parser.Walk does NOT traverse the
|
||||||
|
// duration expressions such as OriginalOffsetExpr so we make our own recursive
|
||||||
|
// call on those to evaluate the result.
|
||||||
func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) {
|
func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visitor, error) {
|
||||||
switch n := node.(type) {
|
switch n := node.(type) {
|
||||||
case *parser.VectorSelector:
|
case *parser.VectorSelector:
|
||||||
if n.OriginalOffsetExpr != nil {
|
if n.OriginalOffsetExpr != nil {
|
||||||
duration, err := calculateDuration(n.OriginalOffsetExpr, true)
|
duration, err := v.calculateDuration(n.OriginalOffsetExpr, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -36,7 +47,7 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit
|
||||||
}
|
}
|
||||||
case *parser.MatrixSelector:
|
case *parser.MatrixSelector:
|
||||||
if n.RangeExpr != nil {
|
if n.RangeExpr != nil {
|
||||||
duration, err := calculateDuration(n.RangeExpr, false)
|
duration, err := v.calculateDuration(n.RangeExpr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -44,21 +55,21 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit
|
||||||
}
|
}
|
||||||
case *parser.SubqueryExpr:
|
case *parser.SubqueryExpr:
|
||||||
if n.OriginalOffsetExpr != nil {
|
if n.OriginalOffsetExpr != nil {
|
||||||
duration, err := calculateDuration(n.OriginalOffsetExpr, true)
|
duration, err := v.calculateDuration(n.OriginalOffsetExpr, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
n.OriginalOffset = duration
|
n.OriginalOffset = duration
|
||||||
}
|
}
|
||||||
if n.StepExpr != nil {
|
if n.StepExpr != nil {
|
||||||
duration, err := calculateDuration(n.StepExpr, false)
|
duration, err := v.calculateDuration(n.StepExpr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
n.Step = duration
|
n.Step = duration
|
||||||
}
|
}
|
||||||
if n.RangeExpr != nil {
|
if n.RangeExpr != nil {
|
||||||
duration, err := calculateDuration(n.RangeExpr, false)
|
duration, err := v.calculateDuration(n.RangeExpr, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -68,9 +79,10 @@ func (v *durationVisitor) Visit(node parser.Node, _ []parser.Node) (parser.Visit
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculateDuration computes the duration from a duration expression.
|
// calculateDuration returns the float value of a duration expression as
|
||||||
func calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) {
|
// time.Duration or an error if the duration is invalid.
|
||||||
duration, err := evaluateDurationExpr(expr)
|
func (v *durationVisitor) calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, error) {
|
||||||
|
duration, err := v.evaluateDurationExpr(expr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -84,7 +96,7 @@ func calculateDuration(expr parser.Expr, allowedNegative bool) (time.Duration, e
|
||||||
}
|
}
|
||||||
|
|
||||||
// evaluateDurationExpr recursively evaluates a duration expression to a float64 value.
|
// evaluateDurationExpr recursively evaluates a duration expression to a float64 value.
|
||||||
func evaluateDurationExpr(expr parser.Expr) (float64, error) {
|
func (v *durationVisitor) evaluateDurationExpr(expr parser.Expr) (float64, error) {
|
||||||
switch n := expr.(type) {
|
switch n := expr.(type) {
|
||||||
case *parser.NumberLiteral:
|
case *parser.NumberLiteral:
|
||||||
return n.Val, nil
|
return n.Val, nil
|
||||||
|
@ -93,19 +105,31 @@ func evaluateDurationExpr(expr parser.Expr) (float64, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if n.LHS != nil {
|
if n.LHS != nil {
|
||||||
lhs, err = evaluateDurationExpr(n.LHS)
|
lhs, err = v.evaluateDurationExpr(n.LHS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rhs, err = evaluateDurationExpr(n.RHS)
|
if n.RHS != nil {
|
||||||
|
rhs, err = v.evaluateDurationExpr(n.RHS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch n.Op {
|
switch n.Op {
|
||||||
|
case parser.STEP:
|
||||||
|
return float64(v.step.Seconds()), nil
|
||||||
|
case parser.MIN:
|
||||||
|
return math.Min(lhs, rhs), nil
|
||||||
|
case parser.MAX:
|
||||||
|
return math.Max(lhs, rhs), nil
|
||||||
case parser.ADD:
|
case parser.ADD:
|
||||||
|
if n.LHS == nil {
|
||||||
|
// Unary positive duration expression.
|
||||||
|
return rhs, nil
|
||||||
|
}
|
||||||
return lhs + rhs, nil
|
return lhs + rhs, nil
|
||||||
case parser.SUB:
|
case parser.SUB:
|
||||||
if n.LHS == nil {
|
if n.LHS == nil {
|
||||||
|
|
|
@ -195,6 +195,24 @@ func TestCalculateDuration(t *testing.T) {
|
||||||
expected: -5 * time.Second,
|
expected: -5 * time.Second,
|
||||||
allowedNegative: true,
|
allowedNegative: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "step",
|
||||||
|
expr: &parser.DurationExpr{
|
||||||
|
Op: parser.STEP,
|
||||||
|
},
|
||||||
|
expected: 1 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "step multiplication",
|
||||||
|
expr: &parser.DurationExpr{
|
||||||
|
LHS: &parser.DurationExpr{
|
||||||
|
Op: parser.STEP,
|
||||||
|
},
|
||||||
|
RHS: &parser.NumberLiteral{Val: 3},
|
||||||
|
Op: parser.MUL,
|
||||||
|
},
|
||||||
|
expected: 3 * time.Second,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "division by zero",
|
name: "division by zero",
|
||||||
expr: &parser.DurationExpr{
|
expr: &parser.DurationExpr{
|
||||||
|
@ -225,7 +243,8 @@ func TestCalculateDuration(t *testing.T) {
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result, err := calculateDuration(tt.expr, tt.allowedNegative)
|
v := &durationVisitor{step: 1 * time.Second}
|
||||||
|
result, err := v.calculateDuration(tt.expr, tt.allowedNegative)
|
||||||
if tt.errorMessage != "" {
|
if tt.errorMessage != "" {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), tt.errorMessage)
|
require.Contains(t, err.Error(), tt.errorMessage)
|
||||||
|
|
|
@ -86,11 +86,6 @@ type engineMetrics struct {
|
||||||
querySamples prometheus.Counter
|
querySamples prometheus.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertibleToInt64 returns true if v does not over-/underflow an int64.
|
|
||||||
func convertibleToInt64(v float64) bool {
|
|
||||||
return v <= maxInt64 && v >= minInt64
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
type (
|
||||||
// ErrQueryTimeout is returned if a query timed out during processing.
|
// ErrQueryTimeout is returned if a query timed out during processing.
|
||||||
ErrQueryTimeout string
|
ErrQueryTimeout string
|
||||||
|
@ -134,7 +129,7 @@ type QueryLogger interface {
|
||||||
io.Closer
|
io.Closer
|
||||||
}
|
}
|
||||||
|
|
||||||
// A Query is derived from an a raw query string and can be run against an engine
|
// A Query is derived from a raw query string and can be run against an engine
|
||||||
// it is associated with.
|
// it is associated with.
|
||||||
type Query interface {
|
type Query interface {
|
||||||
// Exec processes the query. Can only be called once.
|
// Exec processes the query. Can only be called once.
|
||||||
|
@ -481,7 +476,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
|
||||||
|
|
||||||
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
// NewInstantQuery returns an evaluation query for the given expression at the given time.
|
||||||
func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) {
|
func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error) {
|
||||||
pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0)
|
pExpr, qry := ng.newQuery(q, qs, opts, ts, ts, 0*time.Second)
|
||||||
finishQueue, err := ng.queueActive(ctx, qry)
|
finishQueue, err := ng.queueActive(ctx, qry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -494,7 +489,7 @@ func (ng *Engine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts
|
||||||
if err := ng.validateOpts(expr); err != nil {
|
if err := ng.validateOpts(expr); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
*pExpr, err = PreprocessExpr(expr, ts, ts)
|
*pExpr, err = PreprocessExpr(expr, ts, ts, 0)
|
||||||
|
|
||||||
return qry, err
|
return qry, err
|
||||||
}
|
}
|
||||||
|
@ -518,7 +513,7 @@ func (ng *Engine) NewRangeQuery(ctx context.Context, q storage.Queryable, opts Q
|
||||||
if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar {
|
if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar {
|
||||||
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type()))
|
return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type()))
|
||||||
}
|
}
|
||||||
*pExpr, err = PreprocessExpr(expr, start, end)
|
*pExpr, err = PreprocessExpr(expr, start, end, interval)
|
||||||
|
|
||||||
return qry, err
|
return qry, err
|
||||||
}
|
}
|
||||||
|
@ -1433,6 +1428,15 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
||||||
if params.Max() < 1 {
|
if params.Max() < 1 {
|
||||||
return nil, annos
|
return nil, annos
|
||||||
}
|
}
|
||||||
|
if params.HasAnyNaN() {
|
||||||
|
ev.errorf("Parameter value is NaN")
|
||||||
|
}
|
||||||
|
if fParam := params.Min(); fParam <= minInt64 {
|
||||||
|
ev.errorf("Scalar value %v underflows int64", fParam)
|
||||||
|
}
|
||||||
|
if fParam := params.Max(); fParam >= maxInt64 {
|
||||||
|
ev.errorf("Scalar value %v overflows int64", fParam)
|
||||||
|
}
|
||||||
seriess = make(map[uint64]Series, len(inputMatrix))
|
seriess = make(map[uint64]Series, len(inputMatrix))
|
||||||
|
|
||||||
case parser.LIMIT_RATIO:
|
case parser.LIMIT_RATIO:
|
||||||
|
@ -1440,6 +1444,9 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
|
||||||
if params.Max() == 0 && params.Min() == 0 {
|
if params.Max() == 0 && params.Min() == 0 {
|
||||||
return nil, annos
|
return nil, annos
|
||||||
}
|
}
|
||||||
|
if params.HasAnyNaN() {
|
||||||
|
ev.errorf("Ratio value is NaN")
|
||||||
|
}
|
||||||
if params.Max() > 1.0 {
|
if params.Max() > 1.0 {
|
||||||
annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange()))
|
annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange()))
|
||||||
}
|
}
|
||||||
|
@ -3325,9 +3332,6 @@ seriesLoop:
|
||||||
var r float64
|
var r float64
|
||||||
switch op {
|
switch op {
|
||||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
||||||
if !convertibleToInt64(fParam) {
|
|
||||||
ev.errorf("Scalar value %v overflows int64", fParam)
|
|
||||||
}
|
|
||||||
k = int64(fParam)
|
k = int64(fParam)
|
||||||
if k > int64(len(inputMatrix)) {
|
if k > int64(len(inputMatrix)) {
|
||||||
k = int64(len(inputMatrix))
|
k = int64(len(inputMatrix))
|
||||||
|
@ -3339,9 +3343,6 @@ seriesLoop:
|
||||||
return nil, annos
|
return nil, annos
|
||||||
}
|
}
|
||||||
case parser.LIMIT_RATIO:
|
case parser.LIMIT_RATIO:
|
||||||
if math.IsNaN(fParam) {
|
|
||||||
ev.errorf("Ratio value %v is NaN", fParam)
|
|
||||||
}
|
|
||||||
switch {
|
switch {
|
||||||
case fParam == 0:
|
case fParam == 0:
|
||||||
if enh.Ts != ev.endTimestamp {
|
if enh.Ts != ev.endTimestamp {
|
||||||
|
@ -3730,10 +3731,10 @@ func unwrapStepInvariantExpr(e parser.Expr) parser.Expr {
|
||||||
// PreprocessExpr wraps all possible step invariant parts of the given expression with
|
// PreprocessExpr wraps all possible step invariant parts of the given expression with
|
||||||
// StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions
|
// StepInvariantExpr. It also resolves the preprocessors and evaluates duration expressions
|
||||||
// into their numeric values.
|
// into their numeric values.
|
||||||
func PreprocessExpr(expr parser.Expr, start, end time.Time) (parser.Expr, error) {
|
func PreprocessExpr(expr parser.Expr, start, end time.Time, step time.Duration) (parser.Expr, error) {
|
||||||
detectHistogramStatsDecoding(expr)
|
detectHistogramStatsDecoding(expr)
|
||||||
|
|
||||||
if err := parser.Walk(&durationVisitor{}, expr, nil); err != nil {
|
if err := parser.Walk(&durationVisitor{step: step}, expr, nil); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3088,7 +3088,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
|
||||||
t.Run(test.input, func(t *testing.T) {
|
t.Run(test.input, func(t *testing.T) {
|
||||||
expr, err := parser.ParseExpr(test.input)
|
expr, err := parser.ParseExpr(test.input)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expr, err = promql.PreprocessExpr(expr, startTime, endTime)
|
expr, err = promql.PreprocessExpr(expr, startTime, endTime, 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if test.outputTest {
|
if test.outputTest {
|
||||||
require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input)
|
require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input)
|
||||||
|
|
|
@ -144,32 +144,37 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
||||||
// (which is our guess for where the series actually starts or ends).
|
// (which is our guess for where the series actually starts or ends).
|
||||||
|
|
||||||
extrapolationThreshold := averageDurationBetweenSamples * 1.1
|
extrapolationThreshold := averageDurationBetweenSamples * 1.1
|
||||||
extrapolateToInterval := sampledInterval
|
|
||||||
|
|
||||||
if durationToStart >= extrapolationThreshold {
|
if durationToStart >= extrapolationThreshold {
|
||||||
durationToStart = averageDurationBetweenSamples / 2
|
durationToStart = averageDurationBetweenSamples / 2
|
||||||
}
|
}
|
||||||
if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 {
|
if isCounter {
|
||||||
// Counters cannot be negative. If we have any slope at all
|
// Counters cannot be negative. If we have any slope at all
|
||||||
// (i.e. resultFloat went up), we can extrapolate the zero point
|
// (i.e. resultFloat went up), we can extrapolate the zero point
|
||||||
// of the counter. If the duration to the zero point is shorter
|
// of the counter. If the duration to the zero point is shorter
|
||||||
// than the durationToStart, we take the zero point as the start
|
// than the durationToStart, we take the zero point as the start
|
||||||
// of the series, thereby avoiding extrapolation to negative
|
// of the series, thereby avoiding extrapolation to negative
|
||||||
// counter values.
|
// counter values.
|
||||||
// TODO(beorn7): Do this for histograms, too.
|
durationToZero := durationToStart
|
||||||
durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat)
|
if resultFloat > 0 &&
|
||||||
|
len(samples.Floats) > 0 &&
|
||||||
|
samples.Floats[0].F >= 0 {
|
||||||
|
durationToZero = sampledInterval * (samples.Floats[0].F / resultFloat)
|
||||||
|
} else if resultHistogram != nil &&
|
||||||
|
resultHistogram.Count > 0 &&
|
||||||
|
len(samples.Histograms) > 0 &&
|
||||||
|
samples.Histograms[0].H.Count >= 0 {
|
||||||
|
durationToZero = sampledInterval * (samples.Histograms[0].H.Count / resultHistogram.Count)
|
||||||
|
}
|
||||||
if durationToZero < durationToStart {
|
if durationToZero < durationToStart {
|
||||||
durationToStart = durationToZero
|
durationToStart = durationToZero
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extrapolateToInterval += durationToStart
|
|
||||||
|
|
||||||
if durationToEnd >= extrapolationThreshold {
|
if durationToEnd >= extrapolationThreshold {
|
||||||
durationToEnd = averageDurationBetweenSamples / 2
|
durationToEnd = averageDurationBetweenSamples / 2
|
||||||
}
|
}
|
||||||
extrapolateToInterval += durationToEnd
|
|
||||||
|
|
||||||
factor := extrapolateToInterval / sampledInterval
|
factor := (sampledInterval + durationToStart + durationToEnd) / sampledInterval
|
||||||
if isRate {
|
if isRate {
|
||||||
factor /= ms.Range.Seconds()
|
factor /= ms.Range.Seconds()
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,7 +116,8 @@ type DurationExpr struct {
|
||||||
LHS, RHS Expr // The operands on the respective sides of the operator.
|
LHS, RHS Expr // The operands on the respective sides of the operator.
|
||||||
Wrapped bool // Set when the duration is wrapped in parentheses.
|
Wrapped bool // Set when the duration is wrapped in parentheses.
|
||||||
|
|
||||||
StartPos posrange.Pos // For unary operations, the position of the operator.
|
StartPos posrange.Pos // For unary operations and step(), the start position of the operator.
|
||||||
|
EndPos posrange.Pos // For step(), the end position of the operator.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call represents a function call.
|
// Call represents a function call.
|
||||||
|
@ -455,6 +456,18 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DurationExpr) PositionRange() posrange.PositionRange {
|
func (e *DurationExpr) PositionRange() posrange.PositionRange {
|
||||||
|
if e.Op == STEP {
|
||||||
|
return posrange.PositionRange{
|
||||||
|
Start: e.StartPos,
|
||||||
|
End: e.EndPos,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e.RHS == nil {
|
||||||
|
return posrange.PositionRange{
|
||||||
|
Start: e.StartPos,
|
||||||
|
End: e.RHS.PositionRange().End,
|
||||||
|
}
|
||||||
|
}
|
||||||
if e.LHS == nil {
|
if e.LHS == nil {
|
||||||
return posrange.PositionRange{
|
return posrange.PositionRange{
|
||||||
Start: e.StartPos,
|
Start: e.StartPos,
|
||||||
|
|
|
@ -150,6 +150,7 @@ WITHOUT
|
||||||
%token <item>
|
%token <item>
|
||||||
START
|
START
|
||||||
END
|
END
|
||||||
|
STEP
|
||||||
%token preprocessorEnd
|
%token preprocessorEnd
|
||||||
|
|
||||||
// Counter reset hints.
|
// Counter reset hints.
|
||||||
|
@ -174,7 +175,7 @@ START_METRIC_SELECTOR
|
||||||
// Type definitions for grammar rules.
|
// Type definitions for grammar rules.
|
||||||
%type <matchers> label_match_list
|
%type <matchers> label_match_list
|
||||||
%type <matcher> label_matcher
|
%type <matcher> label_matcher
|
||||||
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint
|
%type <item> aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint min_max
|
||||||
%type <labels> label_set metric
|
%type <labels> label_set metric
|
||||||
%type <lblList> label_set_list
|
%type <lblList> label_set_list
|
||||||
%type <label> label_set_item
|
%type <label> label_set_item
|
||||||
|
@ -478,7 +479,7 @@ offset_expr: expr OFFSET offset_duration_expr
|
||||||
$$ = $1
|
$$ = $1
|
||||||
}
|
}
|
||||||
| expr OFFSET error
|
| expr OFFSET error
|
||||||
{ yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
|
{ yylex.(*parser).unexpected("offset", "number, duration, or step()"); $$ = $1 }
|
||||||
;
|
;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -574,11 +575,11 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati
|
||||||
| expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr error
|
| expr LEFT_BRACKET positive_duration_expr COLON positive_duration_expr error
|
||||||
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
|
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
|
||||||
| expr LEFT_BRACKET positive_duration_expr COLON error
|
| expr LEFT_BRACKET positive_duration_expr COLON error
|
||||||
{ yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
|
{ yylex.(*parser).unexpected("subquery selector", "number, duration, or step() or \"]\""); $$ = $1 }
|
||||||
| expr LEFT_BRACKET positive_duration_expr error
|
| expr LEFT_BRACKET positive_duration_expr error
|
||||||
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
|
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
|
||||||
| expr LEFT_BRACKET error
|
| expr LEFT_BRACKET error
|
||||||
{ yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
|
{ yylex.(*parser).unexpected("subquery or range selector", "number, duration, or step()"); $$ = $1 }
|
||||||
;
|
;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -695,7 +696,7 @@ metric : metric_identifier label_set
|
||||||
;
|
;
|
||||||
|
|
||||||
|
|
||||||
metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO;
|
metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO | STEP;
|
||||||
|
|
||||||
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
|
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
|
||||||
{ $$ = labels.New($2...) }
|
{ $$ = labels.New($2...) }
|
||||||
|
@ -952,7 +953,7 @@ counter_reset_hint : UNKNOWN_COUNTER_RESET | COUNTER_RESET | NOT_COUNTER_RESET |
|
||||||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
|
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
|
||||||
|
|
||||||
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO;
|
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO | STEP;
|
||||||
|
|
||||||
unary_op : ADD | SUB;
|
unary_op : ADD | SUB;
|
||||||
|
|
||||||
|
@ -1079,9 +1080,70 @@ offset_duration_expr : number_duration_literal
|
||||||
nl.PosRange.Start = $1.Pos
|
nl.PosRange.Start = $1.Pos
|
||||||
$$ = nl
|
$$ = nl
|
||||||
}
|
}
|
||||||
|
| STEP LEFT_PAREN RIGHT_PAREN
|
||||||
|
{
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: $1.PositionRange().Start,
|
||||||
|
EndPos: $3.PositionRange().End,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| unary_op STEP LEFT_PAREN RIGHT_PAREN
|
||||||
|
{
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: $1.Typ,
|
||||||
|
RHS: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: $2.PositionRange().Start,
|
||||||
|
EndPos: $4.PositionRange().End,
|
||||||
|
},
|
||||||
|
StartPos: $1.Pos,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN
|
||||||
|
{
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: $1.Typ,
|
||||||
|
StartPos: $1.PositionRange().Start,
|
||||||
|
EndPos: $6.PositionRange().End,
|
||||||
|
LHS: $3.(Expr),
|
||||||
|
RHS: $5.(Expr),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| unary_op min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN
|
||||||
|
{
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: $1.Typ,
|
||||||
|
StartPos: $1.Pos,
|
||||||
|
EndPos: $6.PositionRange().End,
|
||||||
|
RHS: &DurationExpr{
|
||||||
|
Op: $2.Typ,
|
||||||
|
StartPos: $2.PositionRange().Start,
|
||||||
|
EndPos: $6.PositionRange().End,
|
||||||
|
LHS: $4.(Expr),
|
||||||
|
RHS: $6.(Expr),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| unary_op LEFT_PAREN duration_expr RIGHT_PAREN %prec MUL
|
||||||
|
{
|
||||||
|
de := $3.(*DurationExpr)
|
||||||
|
de.Wrapped = true
|
||||||
|
if $1.Typ == SUB {
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: SUB,
|
||||||
|
RHS: de,
|
||||||
|
StartPos: $1.Pos,
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
$$ = $3
|
||||||
|
}
|
||||||
| duration_expr
|
| duration_expr
|
||||||
;
|
;
|
||||||
|
|
||||||
|
min_max: MIN | MAX ;
|
||||||
|
|
||||||
duration_expr : number_duration_literal
|
duration_expr : number_duration_literal
|
||||||
{
|
{
|
||||||
nl := $1.(*NumberLiteral)
|
nl := $1.(*NumberLiteral)
|
||||||
|
@ -1164,6 +1226,24 @@ duration_expr : number_duration_literal
|
||||||
yylex.(*parser).experimentalDurationExpr($1.(Expr))
|
yylex.(*parser).experimentalDurationExpr($1.(Expr))
|
||||||
$$ = &DurationExpr{Op: POW, LHS: $1.(Expr), RHS: $3.(Expr)}
|
$$ = &DurationExpr{Op: POW, LHS: $1.(Expr), RHS: $3.(Expr)}
|
||||||
}
|
}
|
||||||
|
| STEP LEFT_PAREN RIGHT_PAREN
|
||||||
|
{
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: $1.PositionRange().Start,
|
||||||
|
EndPos: $3.PositionRange().End,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| min_max LEFT_PAREN duration_expr COMMA duration_expr RIGHT_PAREN
|
||||||
|
{
|
||||||
|
$$ = &DurationExpr{
|
||||||
|
Op: $1.Typ,
|
||||||
|
StartPos: $1.PositionRange().Start,
|
||||||
|
EndPos: $6.PositionRange().End,
|
||||||
|
LHS: $3.(Expr),
|
||||||
|
RHS: $5.(Expr),
|
||||||
|
}
|
||||||
|
}
|
||||||
| paren_duration_expr
|
| paren_duration_expr
|
||||||
;
|
;
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -140,6 +140,7 @@ var key = map[string]ItemType{
|
||||||
// Preprocessors.
|
// Preprocessors.
|
||||||
"start": START,
|
"start": START,
|
||||||
"end": END,
|
"end": END,
|
||||||
|
"step": STEP,
|
||||||
}
|
}
|
||||||
|
|
||||||
var histogramDesc = map[string]ItemType{
|
var histogramDesc = map[string]ItemType{
|
||||||
|
@ -462,11 +463,20 @@ func lexStatements(l *Lexer) stateFn {
|
||||||
l.backup()
|
l.backup()
|
||||||
return lexKeywordOrIdentifier
|
return lexKeywordOrIdentifier
|
||||||
}
|
}
|
||||||
|
switch r {
|
||||||
|
case ':':
|
||||||
if l.gotColon {
|
if l.gotColon {
|
||||||
return l.errorf("unexpected colon %q", r)
|
return l.errorf("unexpected colon %q", r)
|
||||||
}
|
}
|
||||||
l.emit(COLON)
|
l.emit(COLON)
|
||||||
l.gotColon = true
|
l.gotColon = true
|
||||||
|
return lexStatements
|
||||||
|
case 's', 'S', 'm', 'M':
|
||||||
|
if l.scanDurationKeyword() {
|
||||||
|
return lexStatements
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return l.errorf("unexpected character: %q, expected %q", r, ':')
|
||||||
case r == '(':
|
case r == '(':
|
||||||
l.emit(LEFT_PAREN)
|
l.emit(LEFT_PAREN)
|
||||||
l.parenDepth++
|
l.parenDepth++
|
||||||
|
@ -889,6 +899,32 @@ func lexNumber(l *Lexer) stateFn {
|
||||||
return lexStatements
|
return lexStatements
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *Lexer) scanDurationKeyword() bool {
|
||||||
|
for {
|
||||||
|
switch r := l.next(); {
|
||||||
|
case isAlpha(r):
|
||||||
|
// absorb.
|
||||||
|
default:
|
||||||
|
l.backup()
|
||||||
|
word := l.input[l.start:l.pos]
|
||||||
|
kw := strings.ToLower(word)
|
||||||
|
switch kw {
|
||||||
|
case "step":
|
||||||
|
l.emit(STEP)
|
||||||
|
return true
|
||||||
|
case "min":
|
||||||
|
l.emit(MIN)
|
||||||
|
return true
|
||||||
|
case "max":
|
||||||
|
l.emit(MAX)
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// lexNumberOrDuration scans a number or a duration Item.
|
// lexNumberOrDuration scans a number or a duration Item.
|
||||||
func lexNumberOrDuration(l *Lexer) stateFn {
|
func lexNumberOrDuration(l *Lexer) stateFn {
|
||||||
if l.scanNumber() {
|
if l.scanNumber() {
|
||||||
|
@ -1133,6 +1169,14 @@ func lexDurationExpr(l *Lexer) stateFn {
|
||||||
case r == '^':
|
case r == '^':
|
||||||
l.emit(POW)
|
l.emit(POW)
|
||||||
return lexDurationExpr
|
return lexDurationExpr
|
||||||
|
case r == ',':
|
||||||
|
l.emit(COMMA)
|
||||||
|
return lexDurationExpr
|
||||||
|
case r == 's' || r == 'S' || r == 'm' || r == 'M':
|
||||||
|
if l.scanDurationKeyword() {
|
||||||
|
return lexDurationExpr
|
||||||
|
}
|
||||||
|
return l.errorf("unexpected character in duration expression: %q", r)
|
||||||
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
case isDigit(r) || (r == '.' && isDigit(l.peek())):
|
||||||
l.backup()
|
l.backup()
|
||||||
l.gotDuration = true
|
l.gotDuration = true
|
||||||
|
|
|
@ -614,6 +614,43 @@ var testExpr = []struct {
|
||||||
fail: true,
|
fail: true,
|
||||||
errMsg: "1:11: parse error: unexpected <ignoring>",
|
errMsg: "1:11: parse error: unexpected <ignoring>",
|
||||||
},
|
},
|
||||||
|
// Vector selectors.
|
||||||
|
{
|
||||||
|
input: `offset{step="1s"}[5m]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "offset",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, "step", "1s"),
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "offset"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 17,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Range: 5 * time.Minute,
|
||||||
|
EndPos: 21,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `step{offset="1s"}[5m]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "step",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, "offset", "1s"),
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "step"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 17,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Range: 5 * time.Minute,
|
||||||
|
EndPos: 21,
|
||||||
|
},
|
||||||
|
},
|
||||||
// Vector binary operations.
|
// Vector binary operations.
|
||||||
{
|
{
|
||||||
input: "foo * bar",
|
input: "foo * bar",
|
||||||
|
@ -2388,7 +2425,7 @@ var testExpr = []struct {
|
||||||
{
|
{
|
||||||
input: `foo[]`,
|
input: `foo[]`,
|
||||||
fail: true,
|
fail: true,
|
||||||
errMsg: "unexpected \"]\" in subquery selector, expected number or duration",
|
errMsg: "unexpected \"]\" in subquery or range selector, expected number, duration, or step()",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: `foo[-1]`,
|
input: `foo[-1]`,
|
||||||
|
@ -2403,7 +2440,7 @@ var testExpr = []struct {
|
||||||
{
|
{
|
||||||
input: `some_metric[5m] OFFSET`,
|
input: `some_metric[5m] OFFSET`,
|
||||||
fail: true,
|
fail: true,
|
||||||
errMsg: "unexpected end of input in offset, expected number or duration",
|
errMsg: "1:23: parse error: unexpected end of input in offset, expected number, duration, or step()",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
input: `some_metric OFFSET 1m[5m]`,
|
input: `some_metric OFFSET 1m[5m]`,
|
||||||
|
@ -4131,6 +4168,242 @@ var testExpr = []struct {
|
||||||
EndPos: 13,
|
EndPos: 13,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: `foo[step()]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RangeExpr: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 4,
|
||||||
|
EndPos: 10,
|
||||||
|
},
|
||||||
|
EndPos: 11,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo[ - step ( ) ]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RangeExpr: &DurationExpr{
|
||||||
|
Op: SUB,
|
||||||
|
StartPos: 6,
|
||||||
|
RHS: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 9,
|
||||||
|
EndPos: 19,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
EndPos: 22,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo[ step ( ) ]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RangeExpr: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 7,
|
||||||
|
EndPos: 17,
|
||||||
|
},
|
||||||
|
EndPos: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo[-step()]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RangeExpr: &DurationExpr{
|
||||||
|
Op: SUB,
|
||||||
|
StartPos: 4,
|
||||||
|
RHS: &DurationExpr{Op: STEP, StartPos: 5, EndPos: 11},
|
||||||
|
},
|
||||||
|
EndPos: 12,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo offset step()`,
|
||||||
|
expected: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 17,
|
||||||
|
},
|
||||||
|
OriginalOffsetExpr: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 11,
|
||||||
|
EndPos: 17,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo offset -step()`,
|
||||||
|
expected: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 18,
|
||||||
|
},
|
||||||
|
OriginalOffsetExpr: &DurationExpr{
|
||||||
|
Op: SUB,
|
||||||
|
StartPos: 11,
|
||||||
|
RHS: &DurationExpr{Op: STEP, StartPos: 12, EndPos: 18},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo[max(step(),5s)]`,
|
||||||
|
expected: &MatrixSelector{
|
||||||
|
VectorSelector: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RangeExpr: &DurationExpr{
|
||||||
|
Op: MAX,
|
||||||
|
LHS: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 8,
|
||||||
|
EndPos: 14,
|
||||||
|
},
|
||||||
|
RHS: &NumberLiteral{
|
||||||
|
Val: 5,
|
||||||
|
Duration: true,
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 15,
|
||||||
|
End: 17,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StartPos: 4,
|
||||||
|
EndPos: 18,
|
||||||
|
},
|
||||||
|
EndPos: 19,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo offset max(step(),5s)`,
|
||||||
|
expected: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 25,
|
||||||
|
},
|
||||||
|
OriginalOffsetExpr: &DurationExpr{
|
||||||
|
Op: MAX,
|
||||||
|
LHS: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 15,
|
||||||
|
EndPos: 21,
|
||||||
|
},
|
||||||
|
RHS: &NumberLiteral{
|
||||||
|
Val: 5,
|
||||||
|
Duration: true,
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 22,
|
||||||
|
End: 24,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StartPos: 11,
|
||||||
|
EndPos: 25,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo offset -min(5s,step()+8s)`,
|
||||||
|
expected: &VectorSelector{
|
||||||
|
Name: "foo",
|
||||||
|
LabelMatchers: []*labels.Matcher{
|
||||||
|
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
|
||||||
|
},
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 0,
|
||||||
|
End: 29,
|
||||||
|
},
|
||||||
|
OriginalOffsetExpr: &DurationExpr{
|
||||||
|
Op: SUB,
|
||||||
|
RHS: &DurationExpr{
|
||||||
|
Op: MIN,
|
||||||
|
LHS: &NumberLiteral{
|
||||||
|
Val: 5,
|
||||||
|
Duration: true,
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 16,
|
||||||
|
End: 18,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RHS: &DurationExpr{
|
||||||
|
Op: ADD,
|
||||||
|
LHS: &DurationExpr{
|
||||||
|
Op: STEP,
|
||||||
|
StartPos: 19,
|
||||||
|
EndPos: 25,
|
||||||
|
},
|
||||||
|
RHS: &NumberLiteral{
|
||||||
|
Val: 8,
|
||||||
|
Duration: true,
|
||||||
|
PosRange: posrange.PositionRange{
|
||||||
|
Start: 26,
|
||||||
|
End: 28,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
StartPos: 12,
|
||||||
|
EndPos: 28,
|
||||||
|
},
|
||||||
|
StartPos: 11,
|
||||||
|
EndPos: 28,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: `foo[4s+4s:1s*2] offset (5s-8)`,
|
input: `foo[4s+4s:1s*2] offset (5s-8)`,
|
||||||
expected: &SubqueryExpr{
|
expected: &SubqueryExpr{
|
||||||
|
@ -4453,6 +4726,16 @@ var testExpr = []struct {
|
||||||
EndPos: 11,
|
EndPos: 11,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: `foo[step]`,
|
||||||
|
fail: true,
|
||||||
|
errMsg: `1:9: parse error: unexpected "]" in subquery or range selector, expected number, duration, or step()`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: `foo[step()/0d]`,
|
||||||
|
fail: true,
|
||||||
|
errMsg: `division by zero`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
input: `foo[5s/0d]`,
|
input: `foo[5s/0d]`,
|
||||||
fail: true,
|
fail: true,
|
||||||
|
@ -4545,6 +4828,16 @@ var testExpr = []struct {
|
||||||
fail: true,
|
fail: true,
|
||||||
errMsg: "unclosed left parenthesis",
|
errMsg: "unclosed left parenthesis",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
input: "foo[5s x 5s]",
|
||||||
|
fail: true,
|
||||||
|
errMsg: "unexpected character: 'x', expected ':'",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "foo[5s s 5s]",
|
||||||
|
fail: true,
|
||||||
|
errMsg: "unexpected character: 's', expected ':'",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeInt64Pointer(val int64) *int64 {
|
func makeInt64Pointer(val int64) *int64 {
|
||||||
|
|
|
@ -84,8 +84,8 @@ func (e *DurationExpr) Pretty(int) string {
|
||||||
fmt.Println("e.LHS", e.LHS)
|
fmt.Println("e.LHS", e.LHS)
|
||||||
fmt.Println("e.RHS", e.RHS)
|
fmt.Println("e.RHS", e.RHS)
|
||||||
if e.LHS == nil {
|
if e.LHS == nil {
|
||||||
// This is a unary negative duration expression.
|
// This is a unary duration expression.
|
||||||
s = fmt.Sprintf("%s %s", e.Op, e.RHS.Pretty(0))
|
s = fmt.Sprintf("%s%s", e.Op, e.RHS.Pretty(0))
|
||||||
} else {
|
} else {
|
||||||
s = fmt.Sprintf("%s %s %s", e.LHS.Pretty(0), e.Op, e.RHS.Pretty(0))
|
s = fmt.Sprintf("%s %s %s", e.LHS.Pretty(0), e.Op, e.RHS.Pretty(0))
|
||||||
}
|
}
|
||||||
|
|
|
@ -148,10 +148,25 @@ func (node *BinaryExpr) getMatchingStr() string {
|
||||||
|
|
||||||
func (node *DurationExpr) String() string {
|
func (node *DurationExpr) String() string {
|
||||||
var expr string
|
var expr string
|
||||||
if node.LHS == nil {
|
switch {
|
||||||
// This is a unary negative duration expression.
|
case node.Op == STEP:
|
||||||
|
expr = "step()"
|
||||||
|
case node.Op == MIN:
|
||||||
|
expr = fmt.Sprintf("min(%s, %s)", node.LHS, node.RHS)
|
||||||
|
case node.Op == MAX:
|
||||||
|
expr = fmt.Sprintf("max(%s, %s)", node.LHS, node.RHS)
|
||||||
|
case node.LHS == nil:
|
||||||
|
// This is a unary duration expression.
|
||||||
|
switch node.Op {
|
||||||
|
case SUB:
|
||||||
expr = fmt.Sprintf("%s%s", node.Op, node.RHS)
|
expr = fmt.Sprintf("%s%s", node.Op, node.RHS)
|
||||||
} else {
|
case ADD:
|
||||||
|
expr = node.RHS.String()
|
||||||
|
default:
|
||||||
|
// This should never happen.
|
||||||
|
panic(fmt.Sprintf("unexpected unary duration expression: %s", node.Op))
|
||||||
|
}
|
||||||
|
default:
|
||||||
expr = fmt.Sprintf("%s %s %s", node.LHS, node.Op, node.RHS)
|
expr = fmt.Sprintf("%s %s %s", node.LHS, node.Op, node.RHS)
|
||||||
}
|
}
|
||||||
if node.Wrapped {
|
if node.Wrapped {
|
||||||
|
|
|
@ -22,6 +22,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestExprString(t *testing.T) {
|
func TestExprString(t *testing.T) {
|
||||||
|
ExperimentalDurationExpr = true
|
||||||
|
t.Cleanup(func() {
|
||||||
|
ExperimentalDurationExpr = false
|
||||||
|
})
|
||||||
// A list of valid expressions that are expected to be
|
// A list of valid expressions that are expected to be
|
||||||
// returned as out when calling String(). If out is empty the output
|
// returned as out when calling String(). If out is empty the output
|
||||||
// is expected to equal the input.
|
// is expected to equal the input.
|
||||||
|
@ -167,9 +171,56 @@ func TestExprString(t *testing.T) {
|
||||||
in: "1048576",
|
in: "1048576",
|
||||||
out: "1048576",
|
out: "1048576",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
in: "foo[step()]",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo[-step()]",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo[(step())]",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo[-(step())]",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset step()",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset -step()",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset (step())",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset -(step())",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset +(5*2)",
|
||||||
|
out: "foo offset (5 * 2)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset +min(10s, 20s)",
|
||||||
|
out: "foo offset min(10s, 20s)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset -min(10s, 20s)",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo offset -min(10s, +max(step() ^ 2, 2))",
|
||||||
|
out: "foo offset -min(10s, max(step() ^ 2, 2))",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo[200-min(-step()^+step(),1)]",
|
||||||
|
out: "foo[200 - min(-step() ^ step(), 1)]",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "foo[200 - min(step() + 10s, -max(step() ^ 2, 3))]",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range inputs {
|
for _, test := range inputs {
|
||||||
|
t.Run(test.in, func(t *testing.T) {
|
||||||
expr, err := ParseExpr(test.in)
|
expr, err := ParseExpr(test.in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -179,6 +230,7 @@ func TestExprString(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, exp, expr.String())
|
require.Equal(t, exp, expr.String())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -232,30 +232,38 @@ load 5m
|
||||||
http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}
|
http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}
|
||||||
|
|
||||||
eval instant at 0m max(http_requests)
|
eval instant at 0m max(http_requests)
|
||||||
|
expect no_info
|
||||||
{} 4
|
{} 4
|
||||||
|
|
||||||
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
||||||
eval_info instant at 0m max({job="api-server"})
|
eval instant at 0m max({job="api-server"})
|
||||||
|
expect info
|
||||||
{} 4
|
{} 4
|
||||||
|
|
||||||
# The histogram is ignored here so there is no result but it has an info annotation now.
|
# The histogram is ignored here so there is no result but it has an info annotation now.
|
||||||
eval_info instant at 0m max(http_requests_histogram)
|
eval instant at 0m max(http_requests_histogram)
|
||||||
|
expect info
|
||||||
|
|
||||||
eval instant at 0m min(http_requests)
|
eval instant at 0m min(http_requests)
|
||||||
|
expect no_info
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
||||||
eval_info instant at 0m min({job="api-server"})
|
eval instant at 0m min({job="api-server"})
|
||||||
|
expect info
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
# The histogram is ignored here so there is no result but it has an info annotation now.
|
# The histogram is ignored here so there is no result but it has an info annotation now.
|
||||||
eval_info instant at 0m min(http_requests_histogram)
|
eval instant at 0m min(http_requests_histogram)
|
||||||
|
expect info
|
||||||
|
|
||||||
eval instant at 0m max by (group) (http_requests)
|
eval instant at 0m max by (group) (http_requests)
|
||||||
|
expect no_info
|
||||||
{group="production"} 2
|
{group="production"} 2
|
||||||
{group="canary"} 4
|
{group="canary"} 4
|
||||||
|
|
||||||
eval instant at 0m min by (group) (http_requests)
|
eval instant at 0m min by (group) (http_requests)
|
||||||
|
expect no_info
|
||||||
{group="production"} 1
|
{group="production"} 1
|
||||||
{group="canary"} 3
|
{group="canary"} 3
|
||||||
|
|
||||||
|
@ -276,26 +284,31 @@ load 5m
|
||||||
http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11
|
http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11
|
||||||
foo 1+1x9 3
|
foo 1+1x9 3
|
||||||
|
|
||||||
eval_ordered instant at 50m topk(3, http_requests)
|
eval instant at 50m topk(3, http_requests)
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
|
||||||
eval_ordered instant at 50m topk((3), (http_requests))
|
eval instant at 50m topk((3), (http_requests))
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
|
||||||
eval_ordered instant at 50m topk(5, http_requests{group="canary",job="app-server"})
|
eval instant at 50m topk(5, http_requests{group="canary",job="app-server"})
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
|
|
||||||
eval_ordered instant at 50m bottomk(3, http_requests)
|
eval instant at 50m bottomk(3, http_requests)
|
||||||
|
expect ordered
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
http_requests{group="production", instance="1", job="api-server"} 200
|
http_requests{group="production", instance="1", job="api-server"} 200
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
|
|
||||||
eval_ordered instant at 50m bottomk(5, http_requests{group="canary",job="app-server"})
|
eval instant at 50m bottomk(5, http_requests{group="canary",job="app-server"})
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
|
|
||||||
|
@ -309,33 +322,39 @@ eval instant at 50m bottomk by (group) (2, http_requests)
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
http_requests{group="production", instance="1", job="api-server"} 200
|
http_requests{group="production", instance="1", job="api-server"} 200
|
||||||
|
|
||||||
eval_ordered instant at 50m bottomk by (group) (2, http_requests{group="production"})
|
eval instant at 50m bottomk by (group) (2, http_requests{group="production"})
|
||||||
|
expect ordered
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
http_requests{group="production", instance="1", job="api-server"} 200
|
http_requests{group="production", instance="1", job="api-server"} 200
|
||||||
|
|
||||||
# Test NaN is sorted away from the top/bottom.
|
# Test NaN is sorted away from the top/bottom.
|
||||||
eval_ordered instant at 50m topk(3, http_requests{job="api-server",group="production"})
|
eval instant at 50m topk(3, http_requests{job="api-server",group="production"})
|
||||||
|
expect ordered
|
||||||
http_requests{job="api-server", instance="1", group="production"} 200
|
http_requests{job="api-server", instance="1", group="production"} 200
|
||||||
http_requests{job="api-server", instance="0", group="production"} 100
|
http_requests{job="api-server", instance="0", group="production"} 100
|
||||||
http_requests{job="api-server", instance="2", group="production"} NaN
|
http_requests{job="api-server", instance="2", group="production"} NaN
|
||||||
|
|
||||||
eval_ordered instant at 50m bottomk(3, http_requests{job="api-server",group="production"})
|
eval instant at 50m bottomk(3, http_requests{job="api-server",group="production"})
|
||||||
|
expect ordered
|
||||||
http_requests{job="api-server", instance="0", group="production"} 100
|
http_requests{job="api-server", instance="0", group="production"} 100
|
||||||
http_requests{job="api-server", instance="1", group="production"} 200
|
http_requests{job="api-server", instance="1", group="production"} 200
|
||||||
http_requests{job="api-server", instance="2", group="production"} NaN
|
http_requests{job="api-server", instance="2", group="production"} NaN
|
||||||
|
|
||||||
# Test topk and bottomk allocate min(k, input_vector) for results vector
|
# Test topk and bottomk allocate min(k, input_vector) for results vector
|
||||||
eval_ordered instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"})
|
eval instant at 50m bottomk(9999999999, http_requests{job="app-server",group="canary"})
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
|
|
||||||
eval_ordered instant at 50m topk(9999999999, http_requests{job="api-server",group="production"})
|
eval instant at 50m topk(9999999999, http_requests{job="api-server",group="production"})
|
||||||
|
expect ordered
|
||||||
http_requests{job="api-server", instance="1", group="production"} 200
|
http_requests{job="api-server", instance="1", group="production"} 200
|
||||||
http_requests{job="api-server", instance="0", group="production"} 100
|
http_requests{job="api-server", instance="0", group="production"} 100
|
||||||
http_requests{job="api-server", instance="2", group="production"} NaN
|
http_requests{job="api-server", instance="2", group="production"} NaN
|
||||||
|
|
||||||
# Bug #5276.
|
# Bug #5276.
|
||||||
eval_ordered instant at 50m topk(scalar(foo), http_requests)
|
eval instant at 50m topk(scalar(foo), http_requests)
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
@ -348,46 +367,67 @@ eval range from 0m to 50m step 5m count(bottomk(scalar(foo), http_requests))
|
||||||
{} 1 2 3 4 5 6 7 8 9 9 3
|
{} 1 2 3 4 5 6 7 8 9 9 3
|
||||||
|
|
||||||
# Tests for histogram: should ignore histograms.
|
# Tests for histogram: should ignore histograms.
|
||||||
eval_info instant at 50m topk(100, http_requests_histogram)
|
eval instant at 50m topk(100, http_requests_histogram)
|
||||||
|
expect info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info range from 0 to 50m step 5m topk(100, http_requests_histogram)
|
eval range from 0 to 50m step 5m topk(100, http_requests_histogram)
|
||||||
|
expect info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"})
|
eval instant at 50m topk(1, {__name__=~"http_requests(_histogram)?"})
|
||||||
|
expect info
|
||||||
{__name__="http_requests", group="canary", instance="1", job="app-server"} 800
|
{__name__="http_requests", group="canary", instance="1", job="app-server"} 800
|
||||||
|
|
||||||
eval_info instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"}))
|
eval instant at 50m count(topk(1000, {__name__=~"http_requests(_histogram)?"}))
|
||||||
|
expect info
|
||||||
{} 9
|
{} 9
|
||||||
|
|
||||||
eval_info range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"}))
|
eval range from 0 to 50m step 5m count(topk(1000, {__name__=~"http_requests(_histogram)?"}))
|
||||||
|
expect info
|
||||||
{} 9x10
|
{} 9x10
|
||||||
|
|
||||||
eval_info instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"})
|
eval instant at 50m topk by (instance) (1, {__name__=~"http_requests(_histogram)?"})
|
||||||
|
expect info
|
||||||
{__name__="http_requests", group="canary", instance="0", job="app-server"} 700
|
{__name__="http_requests", group="canary", instance="0", job="app-server"} 700
|
||||||
{__name__="http_requests", group="canary", instance="1", job="app-server"} 800
|
{__name__="http_requests", group="canary", instance="1", job="app-server"} 800
|
||||||
{__name__="http_requests", group="production", instance="2", job="api-server"} NaN
|
{__name__="http_requests", group="production", instance="2", job="api-server"} NaN
|
||||||
|
|
||||||
eval_info instant at 50m bottomk(100, http_requests_histogram)
|
eval instant at 50m bottomk(100, http_requests_histogram)
|
||||||
|
expect info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info range from 0 to 50m step 5m bottomk(100, http_requests_histogram)
|
eval range from 0 to 50m step 5m bottomk(100, http_requests_histogram)
|
||||||
|
expect info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"})
|
eval instant at 50m bottomk(1, {__name__=~"http_requests(_histogram)?"})
|
||||||
|
expect info
|
||||||
{__name__="http_requests", group="production", instance="0", job="api-server"} 100
|
{__name__="http_requests", group="production", instance="0", job="api-server"} 100
|
||||||
|
|
||||||
eval_info instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"}))
|
eval instant at 50m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"}))
|
||||||
|
expect info
|
||||||
{} 9
|
{} 9
|
||||||
|
|
||||||
eval_info range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"}))
|
eval range from 0 to 50m step 5m count(bottomk(1000, {__name__=~"http_requests(_histogram)?"}))
|
||||||
|
expect info
|
||||||
{} 9x10
|
{} 9x10
|
||||||
|
|
||||||
eval_info instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"})
|
eval instant at 50m bottomk by (instance) (1, {__name__=~"http_requests(_histogram)?"})
|
||||||
|
expect info
|
||||||
{__name__="http_requests", group="production", instance="0", job="api-server"} 100
|
{__name__="http_requests", group="production", instance="0", job="api-server"} 100
|
||||||
{__name__="http_requests", group="production", instance="1", job="api-server"} 200
|
{__name__="http_requests", group="production", instance="1", job="api-server"} 200
|
||||||
{__name__="http_requests", group="production", instance="2", job="api-server"} NaN
|
{__name__="http_requests", group="production", instance="2", job="api-server"} NaN
|
||||||
|
|
||||||
|
eval instant at 50m topk(NaN, non_existent)
|
||||||
|
expect fail msg: Parameter value is NaN
|
||||||
|
|
||||||
|
eval instant at 50m limitk(NaN, non_existent)
|
||||||
|
expect fail msg: Parameter value is NaN
|
||||||
|
|
||||||
|
eval instant at 50m limit_ratio(NaN, non_existent)
|
||||||
|
expect fail msg: Ratio value is NaN
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
# Tests for count_values.
|
# Tests for count_values.
|
||||||
|
@ -438,8 +478,8 @@ eval instant at 1m count_values by (job, group)("job", version)
|
||||||
{job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2
|
{job="{count:20, sum:10, [-2,-1):2, [-1,-0.5):1, [-0.001,0.001]:2, (0.5,1]:1, (1,2]:2}", group="canary"} 2
|
||||||
|
|
||||||
# Test an invalid label value.
|
# Test an invalid label value.
|
||||||
eval_fail instant at 0 count_values("a\xc5z", version)
|
eval instant at 0 count_values("a\xc5z", version)
|
||||||
expected_fail_message invalid label name "a\xc5z"
|
expect fail msg:invalid label name "a\xc5z"
|
||||||
|
|
||||||
# Tests for quantile.
|
# Tests for quantile.
|
||||||
clear
|
clear
|
||||||
|
@ -453,46 +493,67 @@ load 10s
|
||||||
data{test="uneven samples",point="a"} 0
|
data{test="uneven samples",point="a"} 0
|
||||||
data{test="uneven samples",point="b"} 1
|
data{test="uneven samples",point="b"} 1
|
||||||
data{test="uneven samples",point="c"} 4
|
data{test="uneven samples",point="c"} 4
|
||||||
|
data{test="NaN sample",point="a"} 0
|
||||||
|
data{test="NaN sample",point="b"} 1
|
||||||
|
data{test="NaN sample",point="c"} NaN
|
||||||
data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}
|
data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}
|
||||||
foo 0 1 0 1 0 1 0.8
|
foo 0 1 0 1 0 1 0.8
|
||||||
|
|
||||||
|
# 80th percentile.
|
||||||
|
# The NaN sample is treated as the smallest possible value.
|
||||||
eval instant at 1m quantile without(point)(0.8, data)
|
eval instant at 1m quantile without(point)(0.8, data)
|
||||||
|
expect no_info
|
||||||
{test="two samples"} 0.8
|
{test="two samples"} 0.8
|
||||||
{test="three samples"} 1.6
|
{test="three samples"} 1.6
|
||||||
{test="uneven samples"} 2.8
|
{test="uneven samples"} 2.8
|
||||||
|
{test="NaN sample"} 0.6
|
||||||
|
|
||||||
|
# 20th percentile.
|
||||||
|
# A quantile between NaN and 0 is interpolated as NaN.
|
||||||
|
eval instant at 1m quantile without(point)(0.2, data)
|
||||||
|
{test="two samples"} 0.2
|
||||||
|
{test="three samples"} 0.4
|
||||||
|
{test="uneven samples"} 0.4
|
||||||
|
{test="NaN sample"} NaN
|
||||||
|
|
||||||
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
||||||
eval_info instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"})
|
eval instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"})
|
||||||
|
expect info
|
||||||
{test="two samples"} 0.8
|
{test="two samples"} 0.8
|
||||||
{test="three samples"} 1.6
|
{test="three samples"} 1.6
|
||||||
{test="uneven samples"} 2.8
|
{test="uneven samples"} 2.8
|
||||||
|
{test="NaN sample"} 0.6
|
||||||
|
|
||||||
# The histogram is ignored here so there is no result but it has an info annotation now.
|
# The histogram is ignored here so there is no result but it has an info annotation now.
|
||||||
eval_info instant at 1m quantile(0.8, data_histogram)
|
eval instant at 1m quantile(0.8, data_histogram)
|
||||||
|
expect info
|
||||||
|
|
||||||
# Bug #5276.
|
# Bug #5276.
|
||||||
eval instant at 1m quantile without(point)(scalar(foo), data)
|
eval instant at 1m quantile without(point)(scalar(foo), data)
|
||||||
{test="two samples"} 0.8
|
{test="two samples"} 0.8
|
||||||
{test="three samples"} 1.6
|
{test="three samples"} 1.6
|
||||||
{test="uneven samples"} 2.8
|
{test="uneven samples"} 2.8
|
||||||
|
{test="NaN sample"} 0.6
|
||||||
|
|
||||||
eval instant at 1m quantile without(point)((scalar(foo)), data)
|
eval instant at 1m quantile without(point)((scalar(foo)), data)
|
||||||
{test="two samples"} 0.8
|
{test="two samples"} 0.8
|
||||||
{test="three samples"} 1.6
|
{test="three samples"} 1.6
|
||||||
{test="uneven samples"} 2.8
|
{test="uneven samples"} 2.8
|
||||||
|
{test="NaN sample"} 0.6
|
||||||
|
|
||||||
eval instant at 1m quantile without(point)(NaN, data)
|
eval instant at 1m quantile without(point)(NaN, data)
|
||||||
expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
|
expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
|
||||||
{test="two samples"} NaN
|
{test="two samples"} NaN
|
||||||
{test="three samples"} NaN
|
{test="three samples"} NaN
|
||||||
{test="uneven samples"} NaN
|
{test="uneven samples"} NaN
|
||||||
|
{test="NaN sample"} NaN
|
||||||
|
|
||||||
# Bug #15971.
|
# Bug #15971.
|
||||||
eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data)
|
eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data)
|
||||||
{test="two samples"} 0 1 0 1 0 1 0.8
|
{test="two samples"} 0 1 0 1 0 1 0.8
|
||||||
{test="three samples"} 0 2 0 2 0 2 1.6
|
{test="three samples"} 0 2 0 2 0 2 1.6
|
||||||
{test="uneven samples"} 0 4 0 4 0 4 2.8
|
{test="uneven samples"} 0 4 0 4 0 4 2.8
|
||||||
|
{test="NaN sample"} NaN 1 NaN 1 NaN 1 0.6
|
||||||
|
|
||||||
# Tests for group.
|
# Tests for group.
|
||||||
clear
|
clear
|
||||||
|
@ -736,22 +797,28 @@ load 5m
|
||||||
series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}
|
series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}
|
||||||
|
|
||||||
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
||||||
eval_info instant at 0m stddev(series)
|
eval instant at 0m stddev(series)
|
||||||
|
expect info
|
||||||
{} 0.5
|
{} 0.5
|
||||||
|
|
||||||
eval_info instant at 0m stdvar(series)
|
eval instant at 0m stdvar(series)
|
||||||
|
expect info
|
||||||
{} 0.25
|
{} 0.25
|
||||||
|
|
||||||
# The histogram is ignored here so there is no result but it has an info annotation now.
|
# The histogram is ignored here so there is no result but it has an info annotation now.
|
||||||
eval_info instant at 0m stddev({label="c"})
|
eval instant at 0m stddev({label="c"})
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 0m stdvar({label="c"})
|
eval instant at 0m stdvar({label="c"})
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 0m stddev by (label) (series)
|
eval instant at 0m stddev by (label) (series)
|
||||||
|
expect info
|
||||||
{label="a"} 0
|
{label="a"} 0
|
||||||
{label="b"} 0
|
{label="b"} 0
|
||||||
|
|
||||||
eval_info instant at 0m stdvar by (label) (series)
|
eval instant at 0m stdvar by (label) (series)
|
||||||
|
expect info
|
||||||
{label="a"} 0
|
{label="a"} 0
|
||||||
{label="b"} 0
|
{label="b"} 0
|
||||||
|
|
||||||
|
@ -762,17 +829,21 @@ load 5m
|
||||||
series{label="b"} 1
|
series{label="b"} 1
|
||||||
series{label="c"} 2
|
series{label="c"} 2
|
||||||
|
|
||||||
eval_info instant at 0m stddev(series)
|
eval instant at 0m stddev(series)
|
||||||
|
expect info
|
||||||
{} 0.5
|
{} 0.5
|
||||||
|
|
||||||
eval_info instant at 0m stdvar(series)
|
eval instant at 0m stdvar(series)
|
||||||
|
expect info
|
||||||
{} 0.25
|
{} 0.25
|
||||||
|
|
||||||
eval_info instant at 0m stddev by (label) (series)
|
eval instant at 0m stddev by (label) (series)
|
||||||
|
expect info
|
||||||
{label="b"} 0
|
{label="b"} 0
|
||||||
{label="c"} 0
|
{label="c"} 0
|
||||||
|
|
||||||
eval_info instant at 0m stdvar by (label) (series)
|
eval instant at 0m stdvar by (label) (series)
|
||||||
|
expect info
|
||||||
{label="b"} 0
|
{label="b"} 0
|
||||||
{label="c"} 0
|
{label="c"} 0
|
||||||
|
|
||||||
|
|
|
@ -90,8 +90,7 @@ eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100)
|
||||||
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
|
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
|
||||||
{job="1"} 15
|
{job="1"} 15
|
||||||
|
|
||||||
# Note that this triggers an info annotation because we are rate'ing a metric that does not end in `_total`.
|
eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "")
|
||||||
eval_info instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metric{job="2"}[123s] @ 200), "job", "1", "", "")
|
|
||||||
{job="1"} 0.3
|
{job="1"} 0.3
|
||||||
|
|
||||||
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
|
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
|
||||||
|
|
|
@ -17,6 +17,7 @@ load 5m
|
||||||
testmetric1{src="a",dst="b"} 0
|
testmetric1{src="a",dst="b"} 0
|
||||||
testmetric2{src="a",dst="b"} 1
|
testmetric2{src="a",dst="b"} 1
|
||||||
|
|
||||||
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
eval instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
|
||||||
|
expect fail
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
|
@ -146,3 +146,83 @@ eval instant at 1000s metric1_total offset -4
|
||||||
|
|
||||||
eval instant at 1000s metric1_total offset (-2 ^ 2)
|
eval instant at 1000s metric1_total offset (-2 ^ 2)
|
||||||
metric1_total{} 100
|
metric1_total{} 100
|
||||||
|
|
||||||
|
clear
|
||||||
|
|
||||||
|
load 1s
|
||||||
|
metric1_total 0+1x100
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 10s count_over_time(metric1_total[step()])
|
||||||
|
{} 10 10
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 10s count_over_time(metric1_total[step()+1ms])
|
||||||
|
{} 11 11
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 10s count_over_time(metric1_total[(step())+1])
|
||||||
|
{} 11 11
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 10s count_over_time(metric1_total[1+(STep()-5)*2])
|
||||||
|
{} 11 11
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s count_over_time(metric1_total[step()+1])
|
||||||
|
{} 6 6 6
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s count_over_time(metric1_total[min(step()+1,1h)])
|
||||||
|
{} 6 6 6
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s count_over_time(metric1_total[max(min(step()+1,1h),1ms)])
|
||||||
|
{} 6 6 6
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s count_over_time(metric1_total[((max(min((step()+1),((1h))),1ms)))])
|
||||||
|
{} 6 6 6
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset STEP()
|
||||||
|
metric1_total{} 45 50 55
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset step()
|
||||||
|
metric1_total{} 45 50 55
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset step()*0
|
||||||
|
{} 0 0 0
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset (-step()*2)
|
||||||
|
metric1_total{} 60 65 70
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset -step()*2
|
||||||
|
{} 110 120 130
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset step()^0
|
||||||
|
{} 1 1 1
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset (STEP()/10)
|
||||||
|
metric1_total{} 49 54 59
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset (step())
|
||||||
|
metric1_total{} 45 50 55
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset min(step(), 1s)
|
||||||
|
metric1_total{} 49 54 59
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset min(step(), 1s)+8000
|
||||||
|
{} 8049 8054 8059
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset -min(step(), 1s)+8000
|
||||||
|
{} 8051 8056 8061
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset -(min(step(), 1s))+8000
|
||||||
|
{} 8051 8056 8061
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset -min(step(), 1s)^0
|
||||||
|
{} 1 1 1
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset +min(step(), 1s)^0
|
||||||
|
{} 1 1 1
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset min(step(), 1s)^0
|
||||||
|
{} 1 1 1
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset max(3s,min(step(), 1s))+8000
|
||||||
|
{} 8047 8052 8057
|
||||||
|
|
||||||
|
eval range from 50s to 60s step 5s metric1_total offset -(min(step(), 2s)-5)+8000
|
||||||
|
{} 8047 8052 8057
|
|
@ -228,37 +228,48 @@ load 5m
|
||||||
http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:3 count:3 custom_values:[1] buckets:[3]}} {{schema:-53 sum:3 count:3 custom_values:[5 10] buckets:[3]}}
|
http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:3 count:3 custom_values:[1] buckets:[3]}} {{schema:-53 sum:3 count:3 custom_values:[5 10] buckets:[3]}}
|
||||||
|
|
||||||
eval instant at 50m irate(http_requests_total[50m])
|
eval instant at 50m irate(http_requests_total[50m])
|
||||||
|
expect no_warn
|
||||||
{path="/foo"} .03333333333333333333
|
{path="/foo"} .03333333333333333333
|
||||||
{path="/bar"} .03333333333333333333
|
{path="/bar"} .03333333333333333333
|
||||||
|
|
||||||
# Counter reset.
|
# Counter reset.
|
||||||
eval instant at 30m irate(http_requests_total[50m])
|
eval instant at 30m irate(http_requests_total[50m])
|
||||||
|
expect no_warn
|
||||||
{path="/foo"} .03333333333333333333
|
{path="/foo"} .03333333333333333333
|
||||||
{path="/bar"} 0
|
{path="/bar"} 0
|
||||||
|
|
||||||
eval range from 0 to 20m step 5m irate(http_requests_nan[15m1s])
|
eval range from 0 to 20m step 5m irate(http_requests_nan[15m1s])
|
||||||
|
expect no_warn
|
||||||
{} _ NaN NaN NaN 0.02
|
{} _ NaN NaN NaN 0.02
|
||||||
|
|
||||||
eval instant at 20m irate(http_requests_histogram{path="/a"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/a"}[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/a"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
{path="/a"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval instant at 20m irate(http_requests_histogram{path="/b"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/b"}[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/b"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
{path="/b"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval instant at 20m irate(http_requests_histogram{path="/b"}[6m])
|
eval instant at 20m irate(http_requests_histogram{path="/b"}[6m])
|
||||||
|
expect no_warn
|
||||||
|
|
||||||
eval_warn instant at 20m irate(http_requests_histogram{path="/c"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/c"}[20m])
|
||||||
|
expect warn
|
||||||
{path="/c"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
{path="/c"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval_warn instant at 20m irate(http_requests_histogram{path="/d"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/d"}[20m])
|
||||||
|
expect warn
|
||||||
{path="/d"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
{path="/d"} {{sum:0.01 count:0.01 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval_warn instant at 20m irate(http_requests_histogram{path="/e"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/e"}[20m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval instant at 20m irate(http_requests_histogram{path="/f"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/f"}[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/f"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}}
|
{path="/f"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}}
|
||||||
|
|
||||||
eval instant at 20m irate(http_requests_histogram{path="/g"}[20m])
|
eval instant at 20m irate(http_requests_histogram{path="/g"}[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/g"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}}
|
{path="/g"} {{schema:-53 sum:0.01 count:0.01 custom_values:[5 10] buckets:[0.01]}}
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -272,18 +283,22 @@ load 5m
|
||||||
http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}
|
http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval instant at 20m delta(http_requests[20m])
|
eval instant at 20m delta(http_requests[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/foo"} 200
|
{path="/foo"} 200
|
||||||
{path="/bar"} -200
|
{path="/bar"} -200
|
||||||
|
|
||||||
eval instant at 20m delta(http_requests_gauge[20m])
|
eval instant at 20m delta(http_requests_gauge[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
|
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
|
||||||
|
|
||||||
# delta emits warn annotation for non-gauge histogram types.
|
# delta emits warn annotation for non-gauge histogram types.
|
||||||
eval_warn instant at 20m delta(http_requests_counter[20m])
|
eval instant at 20m delta(http_requests_counter[20m])
|
||||||
|
expect warn
|
||||||
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
|
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
|
||||||
|
|
||||||
# delta emits warn annotation for mix of histogram and floats.
|
# delta emits warn annotation for mix of histogram and floats.
|
||||||
eval_warn instant at 20m delta(http_requests_mix[20m])
|
eval instant at 20m delta(http_requests_mix[20m])
|
||||||
|
expect warn
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -302,31 +317,41 @@ load 5m
|
||||||
http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}}
|
http_requests_histogram{path="/g"} 0 0 {{schema:-53 sum:1 count:1 custom_values:[1] buckets:[2] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval instant at 20m idelta(http_requests[20m])
|
eval instant at 20m idelta(http_requests[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/foo"} 50
|
{path="/foo"} 50
|
||||||
{path="/bar"} -50
|
{path="/bar"} -50
|
||||||
|
|
||||||
eval range from 0 to 20m step 5m idelta(http_requests_nan[15m1s])
|
eval range from 0 to 20m step 5m idelta(http_requests_nan[15m1s])
|
||||||
|
expect no_warn
|
||||||
{} _ NaN NaN NaN 6
|
{} _ NaN NaN NaN 6
|
||||||
|
|
||||||
eval instant at 20m idelta(http_requests_histogram{path="/a"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/a"}[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/a"} {{sum:1 count:3 counter_reset_hint:gauge}}
|
{path="/a"} {{sum:1 count:3 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval instant at 20m idelta(http_requests_histogram{path="/b"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/b"}[20m])
|
||||||
|
expect no_warn
|
||||||
{path="/b"} {{sum:1 count:1 counter_reset_hint:gauge}}
|
{path="/b"} {{sum:1 count:1 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval instant at 20m idelta(http_requests_histogram{path="/b"}[6m])
|
eval instant at 20m idelta(http_requests_histogram{path="/b"}[6m])
|
||||||
|
expect no_warn
|
||||||
|
|
||||||
eval_warn instant at 20m idelta(http_requests_histogram{path="/c"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/c"}[20m])
|
||||||
|
expect warn
|
||||||
{path="/c"} {{sum:1 count:1 counter_reset_hint:gauge}}
|
{path="/c"} {{sum:1 count:1 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval_warn instant at 20m idelta(http_requests_histogram{path="/d"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/d"}[20m])
|
||||||
|
expect warn
|
||||||
{path="/d"} {{sum:1 count:1 counter_reset_hint:gauge}}
|
{path="/d"} {{sum:1 count:1 counter_reset_hint:gauge}}
|
||||||
|
|
||||||
eval_warn instant at 20m idelta(http_requests_histogram{path="/e"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/e"}[20m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn instant at 20m idelta(http_requests_histogram{path="/f"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/f"}[20m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn instant at 20m idelta(http_requests_histogram{path="/g"}[20m])
|
eval instant at 20m idelta(http_requests_histogram{path="/g"}[20m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -341,28 +366,36 @@ load 5m
|
||||||
|
|
||||||
# deriv should return the same as rate in simple cases.
|
# deriv should return the same as rate in simple cases.
|
||||||
eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
|
eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
|
||||||
|
expect no_info
|
||||||
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
||||||
|
|
||||||
eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
|
eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
|
||||||
|
expect no_info
|
||||||
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
||||||
|
|
||||||
# deriv should return correct result.
|
# deriv should return correct result.
|
||||||
eval instant at 50m deriv(testcounter_reset_middle_total[100m])
|
eval instant at 50m deriv(testcounter_reset_middle_total[100m])
|
||||||
|
expect no_info
|
||||||
{} 0.010606060606060607
|
{} 0.010606060606060607
|
||||||
|
|
||||||
# deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
|
# deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
|
||||||
eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m])
|
eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m])
|
||||||
|
expect info
|
||||||
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
{group="canary", instance="1", job="app-server"} 0.26666666666666666
|
||||||
|
|
||||||
eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m])
|
eval instant at 100m deriv(testcounter_reset_middle_mix[110m])
|
||||||
|
expect info
|
||||||
{} 0.010606060606060607
|
{} 0.010606060606060607
|
||||||
|
|
||||||
# deriv should silently ignore ranges consisting only of histograms.
|
# deriv should silently ignore ranges consisting only of histograms.
|
||||||
eval instant at 50m deriv(http_requests_histogram[60m])
|
eval instant at 50m deriv(http_requests_histogram[60m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
# deriv should return NaN in case of +Inf or -Inf found.
|
# deriv should return NaN in case of +Inf or -Inf found.
|
||||||
eval instant at 100m deriv(http_requests_inf[100m])
|
eval instant at 100m deriv(http_requests_inf[100m])
|
||||||
|
expect no_info
|
||||||
{job="app-server", instance="1", group="canary"} NaN
|
{job="app-server", instance="1", group="canary"} NaN
|
||||||
|
|
||||||
# predict_linear should return correct result.
|
# predict_linear should return correct result.
|
||||||
|
@ -380,35 +413,45 @@ eval instant at 100m deriv(http_requests_inf[100m])
|
||||||
# intercept at t=3000: 38.63636363636364
|
# intercept at t=3000: 38.63636363636364
|
||||||
# intercept at t=3000+3600: 76.81818181818181
|
# intercept at t=3000+3600: 76.81818181818181
|
||||||
eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600)
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 3600)
|
||||||
|
expect no_info
|
||||||
{} 70
|
{} 70
|
||||||
|
|
||||||
eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h)
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[50m], 1h)
|
||||||
|
expect no_info
|
||||||
{} 70
|
{} 70
|
||||||
|
|
||||||
# intercept at t = 3000+3600 = 6600
|
# intercept at t = 3000+3600 = 6600
|
||||||
eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
||||||
|
expect no_info
|
||||||
{} 76.81818181818181
|
{} 76.81818181818181
|
||||||
|
|
||||||
eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h)
|
eval instant at 50m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 1h)
|
||||||
|
expect no_info
|
||||||
{} 76.81818181818181
|
{} 76.81818181818181
|
||||||
|
|
||||||
# intercept at t = 600+3600 = 4200
|
# intercept at t = 600+3600 = 4200
|
||||||
eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
||||||
|
expect no_info
|
||||||
{} 51.36363636363637
|
{} 51.36363636363637
|
||||||
|
|
||||||
# intercept at t = 4200+3600 = 7800
|
# intercept at t = 4200+3600 = 7800
|
||||||
eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
|
||||||
|
expect no_info
|
||||||
{} 89.54545454545455
|
{} 89.54545454545455
|
||||||
|
|
||||||
# predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
|
# predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
|
||||||
eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000)
|
eval instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000)
|
||||||
|
expect info
|
||||||
{} 70
|
{} 70
|
||||||
|
|
||||||
eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m)
|
eval instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m)
|
||||||
|
expect info
|
||||||
{} 70
|
{} 70
|
||||||
|
|
||||||
# predict_linear should silently ignore ranges consisting only of histograms.
|
# predict_linear should silently ignore ranges consisting only of histograms.
|
||||||
eval instant at 60m predict_linear(http_requests_histogram[60m], 50m)
|
eval instant at 60m predict_linear(http_requests_histogram[60m], 50m)
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
# predict_linear should return NaN in case of +Inf or -Inf found.
|
# predict_linear should return NaN in case of +Inf or -Inf found.
|
||||||
|
@ -471,13 +514,16 @@ eval instant at 0m label_replace(testmetric, "dst", "", "dst", ".*")
|
||||||
testmetric{src="source-value-20"} 1
|
testmetric{src="source-value-20"} 1
|
||||||
|
|
||||||
# label_replace fails when the regex is invalid.
|
# label_replace fails when the regex is invalid.
|
||||||
eval_fail instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*")
|
eval instant at 0m label_replace(testmetric, "dst", "value-$1", "src", "(.*")
|
||||||
|
expect fail
|
||||||
|
|
||||||
# label_replace fails when the destination label name is not a valid Prometheus label name.
|
# label_replace fails when the destination label name is not a valid Prometheus label name.
|
||||||
eval_fail instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)")
|
eval instant at 0m label_replace(testmetric, "\xff", "", "src", "(.*)")
|
||||||
|
expect fail
|
||||||
|
|
||||||
# label_replace fails when there would be duplicated identical output label sets.
|
# label_replace fails when there would be duplicated identical output label sets.
|
||||||
eval_fail instant at 0m label_replace(testmetric, "src", "", "", "")
|
eval instant at 0m label_replace(testmetric, "src", "", "", "")
|
||||||
|
expect fail
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -540,8 +586,8 @@ eval instant at 0m label_join(testmetric1, "dst", ", ", "src", "src1", "src2")
|
||||||
testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0
|
testmetric1{src="foo",src1="bar",src2="foobar",dst="foo, bar, foobar"} 0
|
||||||
testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1
|
testmetric1{src="fizz",src1="buzz",src2="fizzbuzz",dst="fizz, buzz, fizzbuzz"} 1
|
||||||
|
|
||||||
eval_fail instant at 0m label_join(dup, "label", "", "this")
|
eval instant at 0m label_join(dup, "label", "", "this")
|
||||||
expected_fail_message vector cannot contain metrics with the same labelset
|
expect fail msg:vector cannot contain metrics with the same labelset
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -652,7 +698,8 @@ load 5m
|
||||||
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
|
http_requests{job="app-server", instance="1", group="canary"} 0+80x10
|
||||||
http_requests{job="app-server", instance="2", group="canary"} {{schema:0 sum:1 count:1}}x15
|
http_requests{job="app-server", instance="2", group="canary"} {{schema:0 sum:1 count:1}}x15
|
||||||
|
|
||||||
eval_ordered instant at 50m sort(http_requests)
|
eval instant at 50m sort(http_requests)
|
||||||
|
expect ordered
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
http_requests{group="production", instance="1", job="api-server"} 200
|
http_requests{group="production", instance="1", job="api-server"} 200
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
|
@ -663,7 +710,8 @@ eval_ordered instant at 50m sort(http_requests)
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_desc(http_requests)
|
eval instant at 50m sort_desc(http_requests)
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
@ -701,7 +749,8 @@ load 5m
|
||||||
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 0+10x10
|
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 0+10x10
|
||||||
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10
|
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(http_requests, "instance")
|
eval instant at 50m sort_by_label(http_requests, "instance")
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
|
@ -713,7 +762,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "instance")
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
eval instant at 50m sort_by_label(http_requests, "instance", "group")
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
|
@ -725,7 +775,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
eval instant at 50m sort_by_label(http_requests, "instance", "group")
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
|
@ -737,7 +788,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group")
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "job")
|
eval instant at 50m sort_by_label(http_requests, "group", "instance", "job")
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||||
|
@ -749,7 +801,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "group", "instance", "j
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "group")
|
eval instant at 50m sort_by_label(http_requests, "job", "instance", "group")
|
||||||
|
expect ordered
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
http_requests{group="production", instance="0", job="api-server"} 100
|
http_requests{group="production", instance="0", job="api-server"} 100
|
||||||
http_requests{group="canary", instance="1", job="api-server"} 400
|
http_requests{group="canary", instance="1", job="api-server"} 400
|
||||||
|
@ -761,7 +814,8 @@ eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "gro
|
||||||
http_requests{group="canary", instance="1", job="app-server"} 800
|
http_requests{group="canary", instance="1", job="app-server"} 800
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance")
|
eval instant at 50m sort_by_label_desc(http_requests, "instance")
|
||||||
|
expect ordered
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
@ -773,7 +827,8 @@ eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance")
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group")
|
eval instant at 50m sort_by_label_desc(http_requests, "instance", "group")
|
||||||
|
expect ordered
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
@ -785,7 +840,8 @@ eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job")
|
eval instant at 50m sort_by_label_desc(http_requests, "instance", "group", "job")
|
||||||
|
expect ordered
|
||||||
http_requests{group="production", instance="2", job="api-server"} 100
|
http_requests{group="production", instance="2", job="api-server"} 100
|
||||||
http_requests{group="canary", instance="2", job="api-server"} NaN
|
http_requests{group="canary", instance="2", job="api-server"} NaN
|
||||||
http_requests{group="production", instance="1", job="app-server"} 600
|
http_requests{group="production", instance="1", job="app-server"} 600
|
||||||
|
@ -797,7 +853,8 @@ eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group
|
||||||
http_requests{group="canary", instance="0", job="app-server"} 700
|
http_requests{group="canary", instance="0", job="app-server"} 700
|
||||||
http_requests{group="canary", instance="0", job="api-server"} 300
|
http_requests{group="canary", instance="0", job="api-server"} 300
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(cpu_time_total, "cpu")
|
eval instant at 50m sort_by_label(cpu_time_total, "cpu")
|
||||||
|
expect ordered
|
||||||
cpu_time_total{job="cpu", cpu="0"} 100
|
cpu_time_total{job="cpu", cpu="0"} 100
|
||||||
cpu_time_total{job="cpu", cpu="1"} 100
|
cpu_time_total{job="cpu", cpu="1"} 100
|
||||||
cpu_time_total{job="cpu", cpu="2"} 100
|
cpu_time_total{job="cpu", cpu="2"} 100
|
||||||
|
@ -809,12 +866,14 @@ eval_ordered instant at 50m sort_by_label(cpu_time_total, "cpu")
|
||||||
cpu_time_total{job="cpu", cpu="21"} 100
|
cpu_time_total{job="cpu", cpu="21"} 100
|
||||||
cpu_time_total{job="cpu", cpu="100"} 100
|
cpu_time_total{job="cpu", cpu="100"} 100
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(node_uname_info, "instance")
|
eval instant at 50m sort_by_label(node_uname_info, "instance")
|
||||||
|
expect ordered
|
||||||
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
|
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
|
||||||
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100
|
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100
|
||||||
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
|
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
|
||||||
|
|
||||||
eval_ordered instant at 50m sort_by_label(node_uname_info, "release")
|
eval instant at 50m sort_by_label(node_uname_info, "release")
|
||||||
|
expect ordered
|
||||||
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100
|
node_uname_info{job="node_exporter", instance="4m600", release="1.2.3"} 100
|
||||||
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
|
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
|
||||||
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
|
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
|
||||||
|
@ -835,13 +894,15 @@ load 10s
|
||||||
http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000
|
http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000
|
||||||
|
|
||||||
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
|
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
|
||||||
|
expect no_info
|
||||||
{job="api-server", instance="0", group="production"} 8000
|
{job="api-server", instance="0", group="production"} 8000
|
||||||
{job="api-server", instance="1", group="production"} 16000
|
{job="api-server", instance="1", group="production"} 16000
|
||||||
{job="api-server", instance="0", group="canary"} 24000
|
{job="api-server", instance="0", group="canary"} 24000
|
||||||
{job="api-server", instance="1", group="canary"} 32000
|
{job="api-server", instance="1", group="canary"} 32000
|
||||||
|
|
||||||
# double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
|
# double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
|
||||||
eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1)
|
eval instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1)
|
||||||
|
expect info
|
||||||
{job="api-server", instance="0", group="production"} 30100
|
{job="api-server", instance="0", group="production"} 30100
|
||||||
{job="api-server", instance="1", group="production"} 30200
|
{job="api-server", instance="1", group="production"} 30200
|
||||||
{job="api-server", instance="0", group="canary"} 80300
|
{job="api-server", instance="0", group="canary"} 80300
|
||||||
|
@ -849,6 +910,7 @@ eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m],
|
||||||
|
|
||||||
# double_exponential_smoothing should silently ignore ranges consisting only of histograms.
|
# double_exponential_smoothing should silently ignore ranges consisting only of histograms.
|
||||||
eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1)
|
eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1)
|
||||||
|
expect no_info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
# negative trends
|
# negative trends
|
||||||
|
@ -994,10 +1056,12 @@ eval instant at 55s sum_over_time(metric11[1m])/count_over_time(metric11[1m])
|
||||||
{} NaN
|
{} NaN
|
||||||
|
|
||||||
# Tests for samples with mix of floats and histograms.
|
# Tests for samples with mix of floats and histograms.
|
||||||
eval_warn instant at 55s sum_over_time(metric12[1m])
|
eval instant at 55s sum_over_time(metric12[1m])
|
||||||
|
expect warn
|
||||||
# no result.
|
# no result.
|
||||||
|
|
||||||
eval_warn instant at 55s avg_over_time(metric12[1m])
|
eval instant at 55s avg_over_time(metric12[1m])
|
||||||
|
expect warn
|
||||||
# no result.
|
# no result.
|
||||||
|
|
||||||
# Tests for samples with only histograms.
|
# Tests for samples with only histograms.
|
||||||
|
@ -1184,13 +1248,16 @@ eval instant at 1m stddev_over_time((metric[2m]))
|
||||||
eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m])
|
eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m])
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m])
|
eval instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m])
|
||||||
|
expect info
|
||||||
{type="mix"} 0
|
{type="mix"} 0
|
||||||
|
|
||||||
eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m])
|
eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m])
|
||||||
|
expect no_info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m])
|
eval instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m])
|
||||||
|
expect info
|
||||||
{type="mix"} 0
|
{type="mix"} 0
|
||||||
|
|
||||||
# Tests for stddev_over_time and stdvar_over_time #4927.
|
# Tests for stddev_over_time and stdvar_over_time #4927.
|
||||||
|
@ -1212,12 +1279,15 @@ load 10s
|
||||||
metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}}
|
metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}}
|
||||||
|
|
||||||
eval instant at 70s mad_over_time(metric[70s])
|
eval instant at 70s mad_over_time(metric[70s])
|
||||||
|
expect no_info
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s])
|
eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s])
|
||||||
|
expect no_info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s])
|
eval instant at 70s mad_over_time(metric_histogram{type="mix"}[70s])
|
||||||
|
expect info
|
||||||
{type="mix"} 0
|
{type="mix"} 0
|
||||||
|
|
||||||
# Tests for ts_of_max_over_time and ts_of_min_over_time. Using odd scrape interval to test for rounding bugs.
|
# Tests for ts_of_max_over_time and ts_of_min_over_time. Using odd scrape interval to test for rounding bugs.
|
||||||
|
@ -1261,49 +1331,69 @@ load 10s
|
||||||
data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2
|
data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(0, data[2m])
|
eval instant at 1m quantile_over_time(0, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
{test="two samples"} 0
|
{test="two samples"} 0
|
||||||
{test="three samples"} 0
|
{test="three samples"} 0
|
||||||
{test="uneven samples"} 0
|
{test="uneven samples"} 0
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(0.5, data[2m])
|
eval instant at 1m quantile_over_time(0.5, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
{test="two samples"} 0.5
|
{test="two samples"} 0.5
|
||||||
{test="three samples"} 1
|
{test="three samples"} 1
|
||||||
{test="uneven samples"} 1
|
{test="uneven samples"} 1
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(0.75, data[2m])
|
eval instant at 1m quantile_over_time(0.75, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
{test="two samples"} 0.75
|
{test="two samples"} 0.75
|
||||||
{test="three samples"} 1.5
|
{test="three samples"} 1.5
|
||||||
{test="uneven samples"} 2.5
|
{test="uneven samples"} 2.5
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(0.8, data[2m])
|
eval instant at 1m quantile_over_time(0.8, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
{test="two samples"} 0.8
|
{test="two samples"} 0.8
|
||||||
{test="three samples"} 1.6
|
{test="three samples"} 1.6
|
||||||
{test="uneven samples"} 2.8
|
{test="uneven samples"} 2.8
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(1, data[2m])
|
eval instant at 1m quantile_over_time(1, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
{test="two samples"} 1
|
{test="two samples"} 1
|
||||||
{test="three samples"} 2
|
{test="three samples"} 2
|
||||||
{test="uneven samples"} 4
|
{test="uneven samples"} 4
|
||||||
|
|
||||||
eval_warn instant at 1m quantile_over_time(-1, data[2m])
|
eval instant at 1m quantile_over_time(-1, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect warn
|
||||||
{test="two samples"} -Inf
|
{test="two samples"} -Inf
|
||||||
{test="three samples"} -Inf
|
{test="three samples"} -Inf
|
||||||
{test="uneven samples"} -Inf
|
{test="uneven samples"} -Inf
|
||||||
|
|
||||||
eval_warn instant at 1m quantile_over_time(2, data[2m])
|
eval instant at 1m quantile_over_time(2, data[2m])
|
||||||
|
expect no_info
|
||||||
|
expect warn
|
||||||
{test="two samples"} +Inf
|
{test="two samples"} +Inf
|
||||||
{test="three samples"} +Inf
|
{test="three samples"} +Inf
|
||||||
{test="uneven samples"} +Inf
|
{test="uneven samples"} +Inf
|
||||||
|
|
||||||
eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
|
eval instant at 1m (quantile_over_time(2, (data[2m])))
|
||||||
|
expect no_info
|
||||||
|
expect warn
|
||||||
{test="two samples"} +Inf
|
{test="two samples"} +Inf
|
||||||
{test="three samples"} +Inf
|
{test="three samples"} +Inf
|
||||||
{test="uneven samples"} +Inf
|
{test="uneven samples"} +Inf
|
||||||
|
|
||||||
eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m])
|
eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m])
|
eval instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m])
|
||||||
|
expect info
|
||||||
|
expect no_warn
|
||||||
{test="mix samples"} 1
|
{test="mix samples"} 1
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -1417,7 +1507,8 @@ load 5m
|
||||||
testmetric1{src="a",dst="b"} 0
|
testmetric1{src="a",dst="b"} 0
|
||||||
testmetric2{src="a",dst="b"} 1
|
testmetric2{src="a",dst="b"} 1
|
||||||
|
|
||||||
eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m])
|
eval instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m])
|
||||||
|
expect fail
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -1432,6 +1523,7 @@ load 10s
|
||||||
data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}}
|
data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}}
|
||||||
|
|
||||||
eval instant at 1m min_over_time(data[2m])
|
eval instant at 1m min_over_time(data[2m])
|
||||||
|
expect no_info
|
||||||
{type="numbers"} 0
|
{type="numbers"} 0
|
||||||
{type="some_nan"} 0
|
{type="some_nan"} 0
|
||||||
{type="some_nan2"} 1
|
{type="some_nan2"} 1
|
||||||
|
@ -1439,12 +1531,15 @@ eval instant at 1m min_over_time(data[2m])
|
||||||
{type="only_nan"} NaN
|
{type="only_nan"} NaN
|
||||||
|
|
||||||
eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m])
|
eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m])
|
||||||
|
expect no_info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m])
|
eval instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m])
|
||||||
|
expect info
|
||||||
{type="mix_samples"} 0
|
{type="mix_samples"} 0
|
||||||
|
|
||||||
eval instant at 1m max_over_time(data[2m])
|
eval instant at 1m max_over_time(data[2m])
|
||||||
|
expect no_info
|
||||||
{type="numbers"} 3
|
{type="numbers"} 3
|
||||||
{type="some_nan"} 2
|
{type="some_nan"} 2
|
||||||
{type="some_nan2"} 2
|
{type="some_nan2"} 2
|
||||||
|
@ -1452,12 +1547,15 @@ eval instant at 1m max_over_time(data[2m])
|
||||||
{type="only_nan"} NaN
|
{type="only_nan"} NaN
|
||||||
|
|
||||||
eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m])
|
eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m])
|
||||||
|
expect no_info
|
||||||
#empty
|
#empty
|
||||||
|
|
||||||
eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m])
|
eval instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m])
|
||||||
|
expect info
|
||||||
{type="mix_samples"} 1
|
{type="mix_samples"} 1
|
||||||
|
|
||||||
eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m])
|
eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m])
|
||||||
|
expect no_info
|
||||||
data{type="numbers"} 3
|
data{type="numbers"} 3
|
||||||
data{type="some_nan"} NaN
|
data{type="some_nan"} NaN
|
||||||
data{type="some_nan2"} 1
|
data{type="some_nan2"} 1
|
||||||
|
@ -1467,6 +1565,7 @@ eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m])
|
||||||
data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}}
|
data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}}
|
||||||
|
|
||||||
eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m])
|
eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m])
|
||||||
|
expect no_info
|
||||||
{type="numbers"} 3
|
{type="numbers"} 3
|
||||||
{type="some_nan"} 3
|
{type="some_nan"} 3
|
||||||
{type="some_nan2"} 3
|
{type="some_nan2"} 3
|
||||||
|
|
|
@ -70,74 +70,91 @@ load_with_nhcb 5m
|
||||||
|
|
||||||
# Test histogram_count.
|
# Test histogram_count.
|
||||||
eval instant at 50m histogram_count(testhistogram3)
|
eval instant at 50m histogram_count(testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 110
|
{start="positive"} 110
|
||||||
{start="negative"} 20
|
{start="negative"} 20
|
||||||
|
|
||||||
# Classic way of accessing the count still works.
|
# Classic way of accessing the count still works.
|
||||||
eval instant at 50m testhistogram3_count
|
eval instant at 50m testhistogram3_count
|
||||||
|
expect no_warn
|
||||||
testhistogram3_count{start="positive"} 110
|
testhistogram3_count{start="positive"} 110
|
||||||
testhistogram3_count{start="negative"} 20
|
testhistogram3_count{start="negative"} 20
|
||||||
|
|
||||||
# Test histogram_sum.
|
# Test histogram_sum.
|
||||||
eval instant at 50m histogram_sum(testhistogram3)
|
eval instant at 50m histogram_sum(testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 330
|
{start="positive"} 330
|
||||||
{start="negative"} 80
|
{start="negative"} 80
|
||||||
|
|
||||||
# Classic way of accessing the sum still works.
|
# Classic way of accessing the sum still works.
|
||||||
eval instant at 50m testhistogram3_sum
|
eval instant at 50m testhistogram3_sum
|
||||||
|
expect no_warn
|
||||||
testhistogram3_sum{start="positive"} 330
|
testhistogram3_sum{start="positive"} 330
|
||||||
testhistogram3_sum{start="negative"} 80
|
testhistogram3_sum{start="negative"} 80
|
||||||
|
|
||||||
# Test histogram_avg. This has no classic equivalent.
|
# Test histogram_avg. This has no classic equivalent.
|
||||||
eval instant at 50m histogram_avg(testhistogram3)
|
eval instant at 50m histogram_avg(testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 3
|
{start="positive"} 3
|
||||||
{start="negative"} 4
|
{start="negative"} 4
|
||||||
|
|
||||||
# Test histogram_stddev. This has no classic equivalent.
|
# Test histogram_stddev. This has no classic equivalent.
|
||||||
eval instant at 50m histogram_stddev(testhistogram3)
|
eval instant at 50m histogram_stddev(testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 2.7435461458749795
|
{start="positive"} 2.7435461458749795
|
||||||
{start="negative"} 4.187667907081458
|
{start="negative"} 4.187667907081458
|
||||||
|
|
||||||
# Test histogram_stdvar. This has no classic equivalent.
|
# Test histogram_stdvar. This has no classic equivalent.
|
||||||
eval instant at 50m histogram_stdvar(testhistogram3)
|
eval instant at 50m histogram_stdvar(testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 7.527045454545455
|
{start="positive"} 7.527045454545455
|
||||||
{start="negative"} 17.5365625
|
{start="negative"} 17.5365625
|
||||||
|
|
||||||
# Test histogram_fraction.
|
# Test histogram_fraction.
|
||||||
#
|
#
|
||||||
eval instant at 50m histogram_fraction(0, 4, testhistogram2)
|
eval instant at 50m histogram_fraction(0, 4, testhistogram2)
|
||||||
|
expect no_warn
|
||||||
{} 0.6666666666666666
|
{} 0.6666666666666666
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 4, testhistogram2_bucket)
|
eval instant at 50m histogram_fraction(0, 4, testhistogram2_bucket)
|
||||||
|
expect no_warn
|
||||||
{} 0.6666666666666666
|
{} 0.6666666666666666
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 6, testhistogram2)
|
eval instant at 50m histogram_fraction(0, 6, testhistogram2)
|
||||||
|
expect no_warn
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 6, testhistogram2_bucket)
|
eval instant at 50m histogram_fraction(0, 6, testhistogram2_bucket)
|
||||||
|
expect no_warn
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 3.5, testhistogram2)
|
eval instant at 50m histogram_fraction(0, 3.5, testhistogram2)
|
||||||
|
expect no_warn
|
||||||
{} 0.5833333333333334
|
{} 0.5833333333333334
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 3.5, testhistogram2_bucket)
|
eval instant at 50m histogram_fraction(0, 3.5, testhistogram2_bucket)
|
||||||
|
expect no_warn
|
||||||
{} 0.5833333333333334
|
{} 0.5833333333333334
|
||||||
|
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
|
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.6363636363636364
|
{start="positive"} 0.6363636363636364
|
||||||
{start="negative"} 0
|
{start="negative"} 0
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3_bucket)
|
eval instant at 50m histogram_fraction(0, 0.2, testhistogram3_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.6363636363636364
|
{start="positive"} 0.6363636363636364
|
||||||
{start="negative"} 0
|
{start="negative"} 0
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
|
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.6363636363636364
|
{start="positive"} 0.6363636363636364
|
||||||
{start="negative"} 0
|
{start="negative"} 0
|
||||||
|
|
||||||
|
|
||||||
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m]))
|
eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.6363636363636364
|
{start="positive"} 0.6363636363636364
|
||||||
{start="negative"} 0
|
{start="negative"} 0
|
||||||
|
|
||||||
|
@ -145,276 +162,345 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3_bucket[10m]))
|
||||||
# it exists) and divide by the count to get the same result.
|
# it exists) and divide by the count to get the same result.
|
||||||
|
|
||||||
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
|
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.6363636363636364
|
{start="positive"} 0.6363636363636364
|
||||||
|
|
||||||
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
|
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.6363636363636364
|
{start="positive"} 0.6363636363636364
|
||||||
|
|
||||||
# Test histogram_quantile, native and classic.
|
# Test histogram_quantile, native and classic.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0, testhistogram3)
|
eval instant at 50m histogram_quantile(0, testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0
|
{start="positive"} 0
|
||||||
{start="negative"} -0.25
|
{start="negative"} -0.25
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
|
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0
|
{start="positive"} 0
|
||||||
{start="negative"} -0.25
|
{start="negative"} -0.25
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.25, testhistogram3)
|
eval instant at 50m histogram_quantile(0.25, testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.055
|
{start="positive"} 0.055
|
||||||
{start="negative"} -0.225
|
{start="negative"} -0.225
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
|
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.055
|
{start="positive"} 0.055
|
||||||
{start="negative"} -0.225
|
{start="negative"} -0.225
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, testhistogram3)
|
eval instant at 50m histogram_quantile(0.5, testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.125
|
{start="positive"} 0.125
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
|
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.125
|
{start="positive"} 0.125
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.75, testhistogram3)
|
eval instant at 50m histogram_quantile(0.75, testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.45
|
{start="positive"} 0.45
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
|
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.45
|
{start="positive"} 0.45
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(1, testhistogram3)
|
eval instant at 50m histogram_quantile(1, testhistogram3)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 1
|
{start="positive"} 1
|
||||||
{start="negative"} -0.1
|
{start="negative"} -0.1
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
|
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 1
|
{start="positive"} 1
|
||||||
{start="negative"} -0.1
|
{start="negative"} -0.1
|
||||||
|
|
||||||
# Quantile too low.
|
# Quantile too low.
|
||||||
|
|
||||||
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram)
|
eval instant at 50m histogram_quantile(-0.1, testhistogram)
|
||||||
|
expect warn
|
||||||
{start="positive"} -Inf
|
{start="positive"} -Inf
|
||||||
{start="negative"} -Inf
|
{start="negative"} -Inf
|
||||||
|
|
||||||
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
|
||||||
|
expect warn
|
||||||
{start="positive"} -Inf
|
{start="positive"} -Inf
|
||||||
{start="negative"} -Inf
|
{start="negative"} -Inf
|
||||||
|
|
||||||
# Quantile too high.
|
# Quantile too high.
|
||||||
|
|
||||||
eval_warn instant at 50m histogram_quantile(1.01, testhistogram)
|
eval instant at 50m histogram_quantile(1.01, testhistogram)
|
||||||
|
expect warn
|
||||||
{start="positive"} +Inf
|
{start="positive"} +Inf
|
||||||
{start="negative"} +Inf
|
{start="negative"} +Inf
|
||||||
|
|
||||||
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(1.01, testhistogram_bucket)
|
||||||
|
expect warn
|
||||||
{start="positive"} +Inf
|
{start="positive"} +Inf
|
||||||
{start="negative"} +Inf
|
{start="negative"} +Inf
|
||||||
|
|
||||||
# Quantile invalid.
|
# Quantile invalid.
|
||||||
|
|
||||||
eval_warn instant at 50m histogram_quantile(NaN, testhistogram)
|
eval instant at 50m histogram_quantile(NaN, testhistogram)
|
||||||
|
expect warn
|
||||||
{start="positive"} NaN
|
{start="positive"} NaN
|
||||||
{start="negative"} NaN
|
{start="negative"} NaN
|
||||||
|
|
||||||
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(NaN, testhistogram_bucket)
|
||||||
|
expect warn
|
||||||
{start="positive"} NaN
|
{start="positive"} NaN
|
||||||
{start="negative"} NaN
|
{start="negative"} NaN
|
||||||
|
|
||||||
|
eval instant at 50m histogram_quantile(NaN, non_existent)
|
||||||
|
expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN
|
||||||
|
|
||||||
# Quantile value in lowest bucket.
|
# Quantile value in lowest bucket.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0, testhistogram)
|
eval instant at 50m histogram_quantile(0, testhistogram)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0
|
{start="positive"} 0
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0
|
{start="positive"} 0
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
# Quantile value in highest bucket.
|
# Quantile value in highest bucket.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(1, testhistogram)
|
eval instant at 50m histogram_quantile(1, testhistogram)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 1
|
{start="positive"} 1
|
||||||
{start="negative"} 0.3
|
{start="negative"} 0.3
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(1, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(1, testhistogram_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 1
|
{start="positive"} 1
|
||||||
{start="negative"} 0.3
|
{start="negative"} 0.3
|
||||||
|
|
||||||
# Finally some useful quantiles.
|
# Finally some useful quantiles.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.2, testhistogram)
|
eval instant at 50m histogram_quantile(0.2, testhistogram)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.048
|
{start="positive"} 0.048
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.048
|
{start="positive"} 0.048
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, testhistogram)
|
eval instant at 50m histogram_quantile(0.5, testhistogram)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.15
|
{start="positive"} 0.15
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.15
|
{start="positive"} 0.15
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.8, testhistogram)
|
eval instant at 50m histogram_quantile(0.8, testhistogram)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.72
|
{start="positive"} 0.72
|
||||||
{start="negative"} 0.3
|
{start="negative"} 0.3
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
|
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.72
|
{start="positive"} 0.72
|
||||||
{start="negative"} 0.3
|
{start="negative"} 0.3
|
||||||
|
|
||||||
# More realistic with rates.
|
# More realistic with rates.
|
||||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
|
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.048
|
{start="positive"} 0.048
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
|
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.048
|
{start="positive"} 0.048
|
||||||
{start="negative"} -0.2
|
{start="negative"} -0.2
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
|
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.15
|
{start="positive"} 0.15
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
|
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.15
|
{start="positive"} 0.15
|
||||||
{start="negative"} -0.15
|
{start="negative"} -0.15
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
|
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.72
|
{start="positive"} 0.72
|
||||||
{start="negative"} 0.3
|
{start="negative"} 0.3
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
|
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
|
||||||
|
expect no_warn
|
||||||
{start="positive"} 0.72
|
{start="positive"} 0.72
|
||||||
{start="negative"} 0.3
|
{start="negative"} 0.3
|
||||||
|
|
||||||
# Want results exactly in the middle of the bucket.
|
# Want results exactly in the middle of the bucket.
|
||||||
|
|
||||||
eval instant at 7m histogram_quantile(1./6., testhistogram2)
|
eval instant at 7m histogram_quantile(1./6., testhistogram2)
|
||||||
|
expect no_warn
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
|
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
|
||||||
|
expect no_warn
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 7m histogram_quantile(0.5, testhistogram2)
|
eval instant at 7m histogram_quantile(0.5, testhistogram2)
|
||||||
|
expect no_warn
|
||||||
{} 3
|
{} 3
|
||||||
|
|
||||||
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
|
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
|
||||||
|
expect no_warn
|
||||||
{} 3
|
{} 3
|
||||||
|
|
||||||
eval instant at 7m histogram_quantile(5./6., testhistogram2)
|
eval instant at 7m histogram_quantile(5./6., testhistogram2)
|
||||||
|
expect no_warn
|
||||||
{} 5
|
{} 5
|
||||||
|
|
||||||
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
|
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
|
||||||
|
expect no_warn
|
||||||
{} 5
|
{} 5
|
||||||
|
|
||||||
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
|
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
|
||||||
|
expect no_warn
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
|
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
|
||||||
|
expect no_warn
|
||||||
{} 1
|
{} 1
|
||||||
|
|
||||||
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
|
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
|
||||||
|
expect no_warn
|
||||||
{} 3
|
{} 3
|
||||||
|
|
||||||
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
|
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
|
||||||
|
expect no_warn
|
||||||
{} 3
|
{} 3
|
||||||
|
|
||||||
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
|
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
|
||||||
|
expect no_warn
|
||||||
{} 5
|
{} 5
|
||||||
|
|
||||||
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
|
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
|
||||||
|
expect no_warn
|
||||||
{} 5
|
{} 5
|
||||||
|
|
||||||
# Aggregated histogram: Everything in one. Note how native histograms
|
# Aggregated histogram: Everything in one. Note how native histograms
|
||||||
# don't require aggregation by le.
|
# don't require aggregation by le.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
|
||||||
|
expect no_warn
|
||||||
{} 0.075
|
{} 0.075
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||||
|
expect no_warn
|
||||||
{} 0.075
|
{} 0.075
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
|
||||||
|
expect no_warn
|
||||||
{} 0.1277777777777778
|
{} 0.1277777777777778
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||||
|
expect no_warn
|
||||||
{} 0.1277777777777778
|
{} 0.1277777777777778
|
||||||
|
|
||||||
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
|
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
|
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
|
||||||
|
expect no_warn
|
||||||
{} 0.075
|
{} 0.075
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||||
|
expect no_warn
|
||||||
{} 0.075
|
{} 0.075
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
|
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
|
||||||
|
expect no_warn
|
||||||
{} 0.12777777777777778
|
{} 0.12777777777777778
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
|
||||||
|
expect no_warn
|
||||||
{} 0.12777777777777778
|
{} 0.12777777777777778
|
||||||
|
|
||||||
# Aggregated histogram: By instance.
|
# Aggregated histogram: By instance.
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1"} 0.075
|
{instance="ins1"} 0.075
|
||||||
{instance="ins2"} 0.075
|
{instance="ins2"} 0.075
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1"} 0.075
|
{instance="ins1"} 0.075
|
||||||
{instance="ins2"} 0.075
|
{instance="ins2"} 0.075
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1"} 0.1333333333
|
{instance="ins1"} 0.1333333333
|
||||||
{instance="ins2"} 0.125
|
{instance="ins2"} 0.125
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1"} 0.1333333333
|
{instance="ins1"} 0.1333333333
|
||||||
{instance="ins2"} 0.125
|
{instance="ins2"} 0.125
|
||||||
|
|
||||||
# Aggregated histogram: By job.
|
# Aggregated histogram: By job.
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
|
||||||
|
expect no_warn
|
||||||
{job="job1"} 0.1
|
{job="job1"} 0.1
|
||||||
{job="job2"} 0.0642857142857143
|
{job="job2"} 0.0642857142857143
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
||||||
|
expect no_warn
|
||||||
{job="job1"} 0.1
|
{job="job1"} 0.1
|
||||||
{job="job2"} 0.0642857142857143
|
{job="job2"} 0.0642857142857143
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
|
||||||
|
expect no_warn
|
||||||
{job="job1"} 0.14
|
{job="job1"} 0.14
|
||||||
{job="job2"} 0.1125
|
{job="job2"} 0.1125
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
|
||||||
|
expect no_warn
|
||||||
{job="job1"} 0.14
|
{job="job1"} 0.14
|
||||||
{job="job2"} 0.1125
|
{job="job2"} 0.1125
|
||||||
|
|
||||||
# Aggregated histogram: By job and instance.
|
# Aggregated histogram: By job and instance.
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.11
|
{instance="ins1", job="job1"} 0.11
|
||||||
{instance="ins2", job="job1"} 0.09
|
{instance="ins2", job="job1"} 0.09
|
||||||
{instance="ins1", job="job2"} 0.06
|
{instance="ins1", job="job2"} 0.06
|
||||||
{instance="ins2", job="job2"} 0.0675
|
{instance="ins2", job="job2"} 0.0675
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.11
|
{instance="ins1", job="job1"} 0.11
|
||||||
{instance="ins2", job="job1"} 0.09
|
{instance="ins2", job="job1"} 0.09
|
||||||
{instance="ins1", job="job2"} 0.06
|
{instance="ins1", job="job2"} 0.06
|
||||||
{instance="ins2", job="job2"} 0.0675
|
{instance="ins2", job="job2"} 0.0675
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.15
|
{instance="ins1", job="job1"} 0.15
|
||||||
{instance="ins2", job="job1"} 0.1333333333333333
|
{instance="ins2", job="job1"} 0.1333333333333333
|
||||||
{instance="ins1", job="job2"} 0.1
|
{instance="ins1", job="job2"} 0.1
|
||||||
{instance="ins2", job="job2"} 0.1166666666666667
|
{instance="ins2", job="job2"} 0.1166666666666667
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.15
|
{instance="ins1", job="job1"} 0.15
|
||||||
{instance="ins2", job="job1"} 0.1333333333333333
|
{instance="ins2", job="job1"} 0.1333333333333333
|
||||||
{instance="ins1", job="job2"} 0.1
|
{instance="ins1", job="job2"} 0.1
|
||||||
|
@ -422,24 +508,28 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu
|
||||||
|
|
||||||
# The unaggregated histogram for comparison. Same result as the previous one.
|
# The unaggregated histogram for comparison. Same result as the previous one.
|
||||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
|
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.11
|
{instance="ins1", job="job1"} 0.11
|
||||||
{instance="ins2", job="job1"} 0.09
|
{instance="ins2", job="job1"} 0.09
|
||||||
{instance="ins1", job="job2"} 0.06
|
{instance="ins1", job="job2"} 0.06
|
||||||
{instance="ins2", job="job2"} 0.0675
|
{instance="ins2", job="job2"} 0.0675
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
|
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.11
|
{instance="ins1", job="job1"} 0.11
|
||||||
{instance="ins2", job="job1"} 0.09
|
{instance="ins2", job="job1"} 0.09
|
||||||
{instance="ins1", job="job2"} 0.06
|
{instance="ins1", job="job2"} 0.06
|
||||||
{instance="ins2", job="job2"} 0.0675
|
{instance="ins2", job="job2"} 0.0675
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
|
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.15
|
{instance="ins1", job="job1"} 0.15
|
||||||
{instance="ins2", job="job1"} 0.13333333333333333
|
{instance="ins2", job="job1"} 0.13333333333333333
|
||||||
{instance="ins1", job="job2"} 0.1
|
{instance="ins1", job="job2"} 0.1
|
||||||
{instance="ins2", job="job2"} 0.11666666666666667
|
{instance="ins2", job="job2"} 0.11666666666666667
|
||||||
|
|
||||||
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
|
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
|
||||||
|
expect no_warn
|
||||||
{instance="ins1", job="job1"} 0.15
|
{instance="ins1", job="job1"} 0.15
|
||||||
{instance="ins2", job="job1"} 0.13333333333333333
|
{instance="ins2", job="job1"} 0.13333333333333333
|
||||||
{instance="ins1", job="job2"} 0.1
|
{instance="ins1", job="job2"} 0.1
|
||||||
|
@ -447,25 +537,32 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket
|
||||||
|
|
||||||
# All NHCBs summed into one.
|
# All NHCBs summed into one.
|
||||||
eval instant at 50m sum(request_duration_seconds)
|
eval instant at 50m sum(request_duration_seconds)
|
||||||
|
expect no_warn
|
||||||
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
||||||
|
|
||||||
eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"})
|
eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"})
|
||||||
|
expect no_warn
|
||||||
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
|
||||||
|
|
||||||
eval instant at 50m avg(request_duration_seconds)
|
eval instant at 50m avg(request_duration_seconds)
|
||||||
|
expect no_warn
|
||||||
{} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}}
|
{} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}}
|
||||||
|
|
||||||
# To verify the result above, calculate from classic histogram as well.
|
# To verify the result above, calculate from classic histogram as well.
|
||||||
eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"})
|
eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"})
|
||||||
|
expect no_warn
|
||||||
{} 25
|
{} 25
|
||||||
|
|
||||||
eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"})
|
eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"})
|
||||||
|
expect no_warn
|
||||||
{} 22.5
|
{} 22.5
|
||||||
|
|
||||||
eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"})
|
eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"})
|
||||||
|
expect no_warn
|
||||||
{} 15
|
{} 15
|
||||||
|
|
||||||
eval instant at 50m count(request_duration_seconds)
|
eval instant at 50m count(request_duration_seconds)
|
||||||
|
expect no_warn
|
||||||
{} 4
|
{} 4
|
||||||
|
|
||||||
# A histogram with nonmonotonic bucket counts. This may happen when recording
|
# A histogram with nonmonotonic bucket counts. This may happen when recording
|
||||||
|
@ -481,13 +578,16 @@ load 5m
|
||||||
nonmonotonic_bucket{le="+Inf"} 0+8x10
|
nonmonotonic_bucket{le="+Inf"} 0+8x10
|
||||||
|
|
||||||
# Nonmonotonic buckets, triggering an info annotation.
|
# Nonmonotonic buckets, triggering an info annotation.
|
||||||
eval_info instant at 50m histogram_quantile(0.01, nonmonotonic_bucket)
|
eval instant at 50m histogram_quantile(0.01, nonmonotonic_bucket)
|
||||||
|
expect info
|
||||||
{} 0.0045
|
{} 0.0045
|
||||||
|
|
||||||
eval_info instant at 50m histogram_quantile(0.5, nonmonotonic_bucket)
|
eval instant at 50m histogram_quantile(0.5, nonmonotonic_bucket)
|
||||||
|
expect info
|
||||||
{} 8.5
|
{} 8.5
|
||||||
|
|
||||||
eval_info instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
|
eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket)
|
||||||
|
expect info
|
||||||
{} 979.75
|
{} 979.75
|
||||||
|
|
||||||
# Buckets with different representations of the same upper bound.
|
# Buckets with different representations of the same upper bound.
|
||||||
|
@ -522,9 +622,11 @@ load_with_nhcb 5m
|
||||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
|
||||||
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
|
||||||
|
|
||||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
|
eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
|
||||||
|
expect fail
|
||||||
|
|
||||||
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
|
eval instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})
|
||||||
|
expect fail
|
||||||
|
|
||||||
# Histogram with constant buckets.
|
# Histogram with constant buckets.
|
||||||
load_with_nhcb 1m
|
load_with_nhcb 1m
|
||||||
|
|
|
@ -15,10 +15,14 @@ load 5m
|
||||||
bar 0 1 0 -1 0 1 0 -1 0 1 0
|
bar 0 1 0 -1 0 1 0 -1 0 1 0
|
||||||
|
|
||||||
eval instant at 50m count(limitk by (group) (0, http_requests))
|
eval instant at 50m count(limitk by (group) (0, http_requests))
|
||||||
# empty
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
|
# empty
|
||||||
|
|
||||||
eval instant at 50m count(limitk by (group) (-1, http_requests))
|
eval instant at 50m count(limitk by (group) (-1, http_requests))
|
||||||
# empty
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
|
# empty
|
||||||
|
|
||||||
# Exercise k==1 special case (as sample is added before the main series loop).
|
# Exercise k==1 special case (as sample is added before the main series loop).
|
||||||
eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests)
|
eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests)
|
||||||
|
|
|
@ -73,7 +73,8 @@ eval instant at 10m sum by (__name__, env) (metric_total{env="1"})
|
||||||
|
|
||||||
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label).
|
# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label).
|
||||||
# This is an accidental side effect of delayed __name__ label dropping
|
# This is an accidental side effect of delayed __name__ label dropping
|
||||||
eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m]))
|
eval instant at 10m sum by (__name__) (rate({env="1"}[10m]))
|
||||||
|
expect fail
|
||||||
|
|
||||||
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names.
|
# Aggregation operators aggregate metrics with same labelset and to-be-dropped names.
|
||||||
# This is an accidental side effect of delayed __name__ label dropping
|
# This is an accidental side effect of delayed __name__ label dropping
|
||||||
|
|
|
@ -398,35 +398,44 @@ clear
|
||||||
load 10m
|
load 10m
|
||||||
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1)
|
||||||
|
expect warn
|
||||||
{} Inf
|
{} Inf
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 16
|
{} 16
|
||||||
|
|
||||||
# The following quantiles are within a bucket. Exponential
|
# The following quantiles are within a bucket. Exponential
|
||||||
# interpolation is applied (rather than linear, as it is done for
|
# interpolation is applied (rather than linear, as it is done for
|
||||||
# classic histograms), leading to slightly different quantile values.
|
# classic histograms), leading to slightly different quantile values.
|
||||||
eval instant at 10m histogram_quantile(0.99, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0.99, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 15.67072476139083
|
{} 15.67072476139083
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.9, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0.9, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 12.99603834169977
|
{} 12.99603834169977
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.6, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0.6, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 4.594793419988138
|
{} 4.594793419988138
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.5, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0.5, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 1.5874010519681994
|
{} 1.5874010519681994
|
||||||
|
|
||||||
# Linear interpolation within the zero bucket after all.
|
# Linear interpolation within the zero bucket after all.
|
||||||
eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 0.0006
|
{} 0.0006
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
|
||||||
|
expect no_warn
|
||||||
{} 0
|
{} 0
|
||||||
|
|
||||||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
|
||||||
|
expect warn
|
||||||
{} -Inf
|
{} -Inf
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -435,31 +444,39 @@ clear
|
||||||
load 10m
|
load 10m
|
||||||
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2)
|
||||||
|
expect warn
|
||||||
{} Inf
|
{} Inf
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
|
||||||
|
expect no_warn
|
||||||
{} 0
|
{} 0
|
||||||
|
|
||||||
# Again, the quantile values here are slightly different from what
|
# Again, the quantile values here are slightly different from what
|
||||||
# they would be with linear interpolation. Note that quantiles
|
# they would be with linear interpolation. Note that quantiles
|
||||||
# ending up in the zero bucket are linearly interpolated after all.
|
# ending up in the zero bucket are linearly interpolated after all.
|
||||||
eval instant at 10m histogram_quantile(0.99, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(0.99, histogram_quantile_2)
|
||||||
|
expect no_warn
|
||||||
{} -0.00006
|
{} -0.00006
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.9, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(0.9, histogram_quantile_2)
|
||||||
|
expect no_warn
|
||||||
{} -0.0006
|
{} -0.0006
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.5, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(0.5, histogram_quantile_2)
|
||||||
|
expect no_warn
|
||||||
{} -1.5874010519681996
|
{} -1.5874010519681996
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
|
||||||
|
expect no_warn
|
||||||
{} -12.996038341699768
|
{} -12.996038341699768
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
|
||||||
|
expect no_warn
|
||||||
{} -16
|
{} -16
|
||||||
|
|
||||||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
|
||||||
|
expect warn
|
||||||
{} -Inf
|
{} -Inf
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -470,46 +487,59 @@ clear
|
||||||
load 10m
|
load 10m
|
||||||
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||||
|
|
||||||
eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3)
|
||||||
|
expect warn
|
||||||
{} Inf
|
{} Inf
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} 16
|
{} 16
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.99, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.99, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} 15.34822590920423
|
{} 15.34822590920423
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.9, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.9, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} 10.556063286183155
|
{} 10.556063286183155
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.7, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.7, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} 1.2030250360821164
|
{} 1.2030250360821164
|
||||||
|
|
||||||
# Linear interpolation in the zero bucket, symmetrically centered around
|
# Linear interpolation in the zero bucket, symmetrically centered around
|
||||||
# the zero point.
|
# the zero point.
|
||||||
eval instant at 10m histogram_quantile(0.55, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.55, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} 0.0006
|
{} 0.0006
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.5, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.5, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} 0
|
{} 0
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.45, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.45, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} -0.0006
|
{} -0.0006
|
||||||
|
|
||||||
# Finally negative buckets with mirrored exponential interpolation.
|
# Finally negative buckets with mirrored exponential interpolation.
|
||||||
eval instant at 10m histogram_quantile(0.3, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.3, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} -1.2030250360821169
|
{} -1.2030250360821169
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.1, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.1, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} -10.556063286183155
|
{} -10.556063286183155
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} -15.34822590920423
|
{} -15.34822590920423
|
||||||
|
|
||||||
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
|
||||||
|
expect no_warn
|
||||||
{} -16
|
{} -16
|
||||||
|
|
||||||
eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
|
||||||
|
expect warn
|
||||||
{} -Inf
|
{} -Inf
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -909,63 +939,84 @@ load 10m
|
||||||
float_series_0 0+0x1
|
float_series_0 0+0x1
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*3
|
eval instant at 10m histogram_mul_div*3
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*-1
|
eval instant at 10m histogram_mul_div*-1
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
||||||
|
|
||||||
eval instant at 10m -histogram_mul_div
|
eval instant at 10m -histogram_mul_div
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
{} {{schema:0 count:-30 sum:-33 z_bucket:-3 z_bucket_w:0.001 buckets:[-3 -3 -3] n_buckets:[-6 -6 -6]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*-3
|
eval instant at 10m histogram_mul_div*-3
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}}
|
{} {{schema:0 count:-90 sum:-99 z_bucket:-9 z_bucket_w:0.001 buckets:[-9 -9 -9] n_buckets:[-18 -18 -18]}}
|
||||||
|
|
||||||
eval instant at 10m 3*histogram_mul_div
|
eval instant at 10m 3*histogram_mul_div
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*float_series_3
|
eval instant at 10m histogram_mul_div*float_series_3
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||||
|
|
||||||
eval instant at 10m float_series_3*histogram_mul_div
|
eval instant at 10m float_series_3*histogram_mul_div
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
{} {{schema:0 count:90 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[18 18 18]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div/3
|
eval instant at 10m histogram_mul_div/3
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div/-3
|
eval instant at 10m histogram_mul_div/-3
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}}
|
{} {{schema:0 count:-10 sum:-11 z_bucket:-1 z_bucket_w:0.001 buckets:[-1 -1 -1] n_buckets:[-2 -2 -2]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div/float_series_3
|
eval instant at 10m histogram_mul_div/float_series_3
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
{} {{schema:0 count:10 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[2 2 2]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*0
|
eval instant at 10m histogram_mul_div*0
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||||
|
|
||||||
eval instant at 10m 0*histogram_mul_div
|
eval instant at 10m 0*histogram_mul_div
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*float_series_0
|
eval instant at 10m histogram_mul_div*float_series_0
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||||
|
|
||||||
eval instant at 10m float_series_0*histogram_mul_div
|
eval instant at 10m float_series_0*histogram_mul_div
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
{} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div/0
|
eval instant at 10m histogram_mul_div/0
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}}
|
{} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div/float_series_0
|
eval instant at 10m histogram_mul_div/float_series_0
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}}
|
{} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}}
|
||||||
|
|
||||||
eval instant at 10m histogram_mul_div*0/0
|
eval instant at 10m histogram_mul_div*0/0
|
||||||
|
expect no_info
|
||||||
{} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}}
|
{} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}}
|
||||||
|
|
||||||
eval_info instant at 10m histogram_mul_div*histogram_mul_div
|
eval instant at 10m histogram_mul_div*histogram_mul_div
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 10m histogram_mul_div/histogram_mul_div
|
eval instant at 10m histogram_mul_div/histogram_mul_div
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 10m float_series_3/histogram_mul_div
|
eval instant at 10m float_series_3/histogram_mul_div
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 10m 0/histogram_mul_div
|
eval instant at 10m 0/histogram_mul_div
|
||||||
|
expect info
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -976,13 +1027,17 @@ load 10m
|
||||||
histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
|
||||||
float_sample 0x1
|
float_sample 0x1
|
||||||
|
|
||||||
eval_info instant at 10m float_sample+histogram_sample
|
eval instant at 10m float_sample+histogram_sample
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 10m histogram_sample+float_sample
|
eval instant at 10m histogram_sample+float_sample
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 10m float_sample-histogram_sample
|
eval instant at 10m float_sample-histogram_sample
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 10m histogram_sample-float_sample
|
eval instant at 10m histogram_sample-float_sample
|
||||||
|
expect info
|
||||||
|
|
||||||
# Counter reset only noticeable in a single bucket.
|
# Counter reset only noticeable in a single bucket.
|
||||||
load 5m
|
load 5m
|
||||||
|
@ -1020,11 +1075,13 @@ load 30s
|
||||||
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
|
some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}}
|
||||||
|
|
||||||
# Test the case where we only have two points for rate
|
# Test the case where we only have two points for rate
|
||||||
eval_warn instant at 30s rate(some_metric[1m])
|
eval instant at 30s rate(some_metric[1m])
|
||||||
|
expect warn
|
||||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||||
|
|
||||||
# Test the case where we have more than two points for rate
|
# Test the case where we have more than two points for rate
|
||||||
eval_warn instant at 1m rate(some_metric[1m30s])
|
eval instant at 1m rate(some_metric[1m30s])
|
||||||
|
expect warn
|
||||||
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
{} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}}
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -1034,18 +1091,24 @@ load 30s
|
||||||
some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||||
|
|
||||||
# Start and end with exponential, with custom in the middle.
|
# Start and end with exponential, with custom in the middle.
|
||||||
eval_warn instant at 1m rate(some_metric[1m30s])
|
eval instant at 1m rate(some_metric[1m30s])
|
||||||
|
expect warn
|
||||||
# Should produce no results.
|
# Should produce no results.
|
||||||
|
|
||||||
# Start and end with custom, with exponential in the middle.
|
# Start and end with custom, with exponential in the middle.
|
||||||
eval_warn instant at 1m30s rate(some_metric[1m30s])
|
eval instant at 1m30s rate(some_metric[1m30s])
|
||||||
|
expect warn
|
||||||
# Should produce no results.
|
# Should produce no results.
|
||||||
|
|
||||||
# Start with custom, end with exponential. Return the exponential histogram divided by 30.
|
# Start with custom, end with exponential. Return the exponential histogram divided by 48.
|
||||||
|
# (The 1st sample is the NHCB with count:1. It is mostly ignored with the exception of the
|
||||||
|
# count, which means the rate calculation extrapolates until the count hits 0.)
|
||||||
eval instant at 1m rate(some_metric[1m])
|
eval instant at 1m rate(some_metric[1m])
|
||||||
{} {{schema:0 sum:0.16666666666666666 count:0.13333333333333333 buckets:[0.03333333333333333 0.06666666666666667 0.03333333333333333]}}
|
{} {{count:0.08333333333333333 sum:0.10416666666666666 counter_reset_hint:gauge buckets:[0.020833333333333332 0.041666666666666664 0.020833333333333332]}}
|
||||||
|
|
||||||
# Start with exponential, end with custom. Return the custom buckets histogram divided by 30.
|
# Start with exponential, end with custom. Return the custom buckets histogram divided by 30.
|
||||||
|
# (With the 2nd sample having a count of 1, the extrapolation to zero lands exactly at the
|
||||||
|
# left boundary of the range, so no extrapolation limitation needed.)
|
||||||
eval instant at 30s rate(some_metric[1m])
|
eval instant at 30s rate(some_metric[1m])
|
||||||
{} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}}
|
{} {{schema:-53 sum:0.03333333333333333 count:0.03333333333333333 custom_values:[5 10] buckets:[0.03333333333333333]}}
|
||||||
|
|
||||||
|
@ -1107,10 +1170,12 @@ load 6m
|
||||||
# T=0: only exponential
|
# T=0: only exponential
|
||||||
# T=6: only custom
|
# T=6: only custom
|
||||||
# T=12: mixed, should be ignored and emit a warning
|
# T=12: mixed, should be ignored and emit a warning
|
||||||
eval_warn range from 0 to 12m step 6m sum(metric)
|
eval range from 0 to 12m step 6m sum(metric)
|
||||||
|
expect warn
|
||||||
{} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _
|
{} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _
|
||||||
|
|
||||||
eval_warn range from 0 to 12m step 6m avg(metric)
|
eval range from 0 to 12m step 6m avg(metric)
|
||||||
|
expect warn
|
||||||
{} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _
|
{} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -1124,10 +1189,12 @@ load 6m
|
||||||
# T=0: incompatible, should be ignored and emit a warning
|
# T=0: incompatible, should be ignored and emit a warning
|
||||||
# T=6: compatible
|
# T=6: compatible
|
||||||
# T=12: incompatible followed by compatible, should be ignored and emit a warning
|
# T=12: incompatible followed by compatible, should be ignored and emit a warning
|
||||||
eval_warn range from 0 to 12m step 6m sum(metric)
|
eval range from 0 to 12m step 6m sum(metric)
|
||||||
|
expect warn
|
||||||
{} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
|
{} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _
|
||||||
|
|
||||||
eval_warn range from 0 to 12m step 6m avg(metric)
|
eval range from 0 to 12m step 6m avg(metric)
|
||||||
|
expect warn
|
||||||
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
|
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
|
||||||
|
|
||||||
# Test incompatible schemas with additional aggregation operators
|
# Test incompatible schemas with additional aggregation operators
|
||||||
|
@ -1159,9 +1226,11 @@ eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{s
|
||||||
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _
|
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _
|
||||||
|
|
||||||
# Test incompatible schemas with arithmetic binary operators
|
# Test incompatible schemas with arithmetic binary operators
|
||||||
eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"}
|
eval range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"}
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"}
|
eval range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"}
|
||||||
|
expect warn
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -1171,12 +1240,15 @@ load 6m
|
||||||
metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||||
|
|
||||||
eval range from 0 to 6m step 6m metric1 == metric2
|
eval range from 0 to 6m step 6m metric1 == metric2
|
||||||
metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}}
|
expect no_info
|
||||||
|
metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}}
|
||||||
|
|
||||||
eval range from 0 to 6m step 6m metric1 != metric2
|
eval range from 0 to 6m step 6m metric1 != metric2
|
||||||
metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _
|
expect no_info
|
||||||
|
metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _
|
||||||
|
|
||||||
eval_info range from 0 to 6m step 6m metric2 > metric2
|
eval range from 0 to 6m step 6m metric2 > metric2
|
||||||
|
expect info
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -1186,62 +1258,82 @@ load 6m
|
||||||
# If evaluating at 12m, the first two NHCBs have the same custom values
|
# If evaluating at 12m, the first two NHCBs have the same custom values
|
||||||
# while the 3rd one has different ones.
|
# while the 3rd one has different ones.
|
||||||
|
|
||||||
eval_warn instant at 12m sum_over_time(nhcb_metric[13m])
|
eval instant at 12m sum_over_time(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn instant at 12m avg_over_time(nhcb_metric[13m])
|
eval instant at 12m avg_over_time(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval instant at 12m last_over_time(nhcb_metric[13m])
|
eval instant at 12m last_over_time(nhcb_metric[13m])
|
||||||
nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
expect no_warn
|
||||||
|
nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||||
|
|
||||||
eval instant at 12m count_over_time(nhcb_metric[13m])
|
eval instant at 12m count_over_time(nhcb_metric[13m])
|
||||||
{} 3
|
expect no_warn
|
||||||
|
{} 3
|
||||||
|
|
||||||
eval instant at 12m present_over_time(nhcb_metric[13m])
|
eval instant at 12m present_over_time(nhcb_metric[13m])
|
||||||
{} 1
|
expect no_warn
|
||||||
|
{} 1
|
||||||
|
|
||||||
eval instant at 12m changes(nhcb_metric[13m])
|
eval instant at 12m changes(nhcb_metric[13m])
|
||||||
{} 1
|
expect no_warn
|
||||||
|
{} 1
|
||||||
|
|
||||||
eval_warn instant at 12m delta(nhcb_metric[13m])
|
eval instant at 12m delta(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn instant at 12m increase(nhcb_metric[13m])
|
eval instant at 12m increase(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn instant at 12m rate(nhcb_metric[13m])
|
eval instant at 12m rate(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval instant at 12m resets(nhcb_metric[13m])
|
eval instant at 12m resets(nhcb_metric[13m])
|
||||||
{} 1
|
expect no_warn
|
||||||
|
{} 1
|
||||||
|
|
||||||
# Now doing the same again, but at 18m, where the first NHCB has
|
# Now doing the same again, but at 18m, where the first NHCB has
|
||||||
# different custom_values compared to the other two. This now
|
# different custom_values compared to the other two. This now
|
||||||
# works with no warning for increase() and rate(). No change
|
# works with no warning for increase() and rate(). No change
|
||||||
# otherwise.
|
# otherwise.
|
||||||
|
|
||||||
eval_warn instant at 18m sum_over_time(nhcb_metric[13m])
|
eval instant at 18m sum_over_time(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval_warn instant at 18m avg_over_time(nhcb_metric[13m])
|
eval instant at 18m avg_over_time(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval instant at 18m last_over_time(nhcb_metric[13m])
|
eval instant at 18m last_over_time(nhcb_metric[13m])
|
||||||
nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
expect no_warn
|
||||||
|
nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||||
|
|
||||||
eval instant at 18m count_over_time(nhcb_metric[13m])
|
eval instant at 18m count_over_time(nhcb_metric[13m])
|
||||||
{} 3
|
expect no_warn
|
||||||
|
{} 3
|
||||||
|
|
||||||
eval instant at 18m present_over_time(nhcb_metric[13m])
|
eval instant at 18m present_over_time(nhcb_metric[13m])
|
||||||
{} 1
|
expect no_warn
|
||||||
|
{} 1
|
||||||
|
|
||||||
eval instant at 18m changes(nhcb_metric[13m])
|
eval instant at 18m changes(nhcb_metric[13m])
|
||||||
{} 1
|
expect no_warn
|
||||||
|
{} 1
|
||||||
|
|
||||||
eval_warn instant at 18m delta(nhcb_metric[13m])
|
eval instant at 18m delta(nhcb_metric[13m])
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval instant at 18m increase(nhcb_metric[13m])
|
eval instant at 18m increase(nhcb_metric[13m])
|
||||||
{} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}}
|
expect no_warn
|
||||||
|
{} {{schema:-53 count:1.0833333333333333 sum:1.0833333333333333 custom_values:[5 10] buckets:[1.0833333333333333]}}
|
||||||
|
|
||||||
eval instant at 18m rate(nhcb_metric[13m])
|
eval instant at 18m rate(nhcb_metric[13m])
|
||||||
{} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}}
|
expect no_warn
|
||||||
|
{} {{schema:-53 count:0.0013888888888888887 sum:0.0013888888888888887 custom_values:[5 10] buckets:[0.0013888888888888887]}}
|
||||||
|
|
||||||
eval instant at 18m resets(nhcb_metric[13m])
|
eval instant at 18m resets(nhcb_metric[13m])
|
||||||
{} 1
|
expect no_warn
|
||||||
|
{} 1
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -1259,7 +1351,8 @@ load 1m
|
||||||
metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||||
metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
||||||
|
|
||||||
eval_warn instant at 0 sum by (group) (metric)
|
eval instant at 0 sum by (group) (metric)
|
||||||
|
expect warn
|
||||||
{group="just-floats"} 5
|
{group="just-floats"} 5
|
||||||
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
|
{group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}}
|
||||||
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}
|
{group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}}
|
||||||
|
@ -1275,17 +1368,22 @@ load 10m
|
||||||
histogram_sum_float{idx="0"} 42.0x1
|
histogram_sum_float{idx="0"} 42.0x1
|
||||||
|
|
||||||
eval instant at 10m sum(histogram_sum)
|
eval instant at 10m sum(histogram_sum)
|
||||||
|
expect no_warn
|
||||||
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
|
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
|
||||||
|
|
||||||
eval_warn instant at 10m sum({idx="0"})
|
eval instant at 10m sum({idx="0"})
|
||||||
|
expect warn
|
||||||
|
|
||||||
eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
|
eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"})
|
||||||
|
expect no_warn
|
||||||
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
|
{} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}}
|
||||||
|
|
||||||
eval instant at 10m count(histogram_sum)
|
eval instant at 10m count(histogram_sum)
|
||||||
|
expect no_warn
|
||||||
{} 4
|
{} 4
|
||||||
|
|
||||||
eval instant at 10m avg(histogram_sum)
|
eval instant at 10m avg(histogram_sum)
|
||||||
|
expect no_warn
|
||||||
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
|
{} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}}
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
@ -1373,3 +1471,55 @@ eval instant at 1m histogram_fraction(-Inf, +Inf, histogram_nan)
|
||||||
expect info msg: PromQL info: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name "histogram_nan"
|
expect info msg: PromQL info: input to histogram_fraction has NaN observations, which are excluded from all fractions for metric name "histogram_nan"
|
||||||
{case="100% NaNs"} 0.0
|
{case="100% NaNs"} 0.0
|
||||||
{case="20% NaNs"} 0.8
|
{case="20% NaNs"} 0.8
|
||||||
|
|
||||||
|
clear
|
||||||
|
|
||||||
|
# Tests to demonstrate how an extrapolation below zero is prevented for both float counters and native counter histograms.
|
||||||
|
# Note that the float counter behaves the same as the histogram count after `increase`.
|
||||||
|
|
||||||
|
load 1m
|
||||||
|
metric{type="histogram"} {{schema:0 count:15 sum:25 buckets:[5 10]}} {{schema:0 count:2490 sum:75 buckets:[15 2475]}}x55
|
||||||
|
metric{type="counter"} 15 2490x55
|
||||||
|
|
||||||
|
# End of range coincides with sample. Zero point of count is reached within the range.
|
||||||
|
# Note that the 2nd bucket has an exaggerated increase of 2479.939393939394 (although
|
||||||
|
# it has a value of only 2475 at the end of the range).
|
||||||
|
eval instant at 55m increase(metric[90m])
|
||||||
|
{type="histogram"} {{count:2490 sum:50.303030303030305 counter_reset_hint:gauge buckets:[10.06060606060606 2479.939393939394]}}
|
||||||
|
{type="counter"} 2490
|
||||||
|
|
||||||
|
# End of range does not coincide with sample. Zero point of count is reached within the range.
|
||||||
|
# The 2nd bucket again has an exaggerated increase, but it is less obvious because of the
|
||||||
|
# right-side extrapolation.
|
||||||
|
eval instant at 54m30s increase(metric[90m])
|
||||||
|
{type="histogram"} {{count:2512.9166666666665 sum:50.76599326599326 counter_reset_hint:gauge buckets:[10.153198653198652 2502.7634680134674]}}
|
||||||
|
{type="counter"} 2512.9166666666665
|
||||||
|
|
||||||
|
# End of range coincides with sample. Zero point of count is reached outside of (i.e. before) the range.
|
||||||
|
# This means no change of extrapolation is required for the histogram count (and neither for the float counter),
|
||||||
|
# however, the 2nd bucket's extrapolation will reach zero within the range. The overestimation is visible
|
||||||
|
# easily here because the last sample in the range coincides with the boundary, where the 2nd bucket has
|
||||||
|
# a value of 2475 but has increased by 2476.2045454545455 according to the returned result.
|
||||||
|
eval instant at 55m increase(metric[55m15s])
|
||||||
|
{type="histogram"} {{count:2486.25 sum:50.227272727272734 counter_reset_hint:gauge buckets:[10.045454545454547 2476.2045454545455]}}
|
||||||
|
{type="counter"} 2486.25
|
||||||
|
|
||||||
|
# End of range does not coincide with sample. Zero point of count is reached outside of (i.e. before) the range.
|
||||||
|
# This means no change of extrapolation is required for the histogram count (and neither for the float counter),
|
||||||
|
# however, the 2nd bucket's extrapolation will reach zero within the range.
|
||||||
|
eval instant at 54m30s increase(metric[54m45s])
|
||||||
|
{type="histogram"} {{count:2509.375 sum:50.69444444444444 counter_reset_hint:gauge buckets:[10.13888888888889 2499.236111111111]}}
|
||||||
|
{type="counter"} 2509.375
|
||||||
|
|
||||||
|
# Try the same, but now extract just the histogram count via `histogram_count`.
|
||||||
|
eval instant at 55m histogram_count(increase(metric[90m]))
|
||||||
|
{type="histogram"} 2490
|
||||||
|
|
||||||
|
eval instant at 54m30s histogram_count(increase(metric[90m]))
|
||||||
|
{type="histogram"} 2512.9166666666665
|
||||||
|
|
||||||
|
eval instant at 55m histogram_count(increase(metric[55m15s]))
|
||||||
|
{type="histogram"} 2486.25
|
||||||
|
|
||||||
|
eval instant at 54m30s histogram_count(increase(metric[54m45s]))
|
||||||
|
{type="histogram"} 2509.375
|
||||||
|
|
|
@ -289,24 +289,32 @@ eval instant at 50m http_requests_total{job="api-server", instance="0", group="p
|
||||||
{job="api-server", instance="0", group="production"} 1
|
{job="api-server", instance="0", group="production"} 1
|
||||||
|
|
||||||
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
# The histogram is ignored here so the result doesn't change but it has an info annotation now.
|
||||||
eval_info instant at 5m {job="app-server"} == 80
|
eval instant at 5m {job="app-server"} == 80
|
||||||
|
expect info
|
||||||
http_requests_total{group="canary", instance="1", job="app-server"} 80
|
http_requests_total{group="canary", instance="1", job="app-server"} 80
|
||||||
|
|
||||||
eval_info instant at 5m http_requests_histogram != 80
|
eval instant at 5m http_requests_histogram != 80
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 5m http_requests_histogram > 80
|
eval instant at 5m http_requests_histogram > 80
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 5m http_requests_histogram < 80
|
eval instant at 5m http_requests_histogram < 80
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 5m http_requests_histogram >= 80
|
eval instant at 5m http_requests_histogram >= 80
|
||||||
|
expect info
|
||||||
|
|
||||||
eval_info instant at 5m http_requests_histogram <= 80
|
eval instant at 5m http_requests_histogram <= 80
|
||||||
|
expect info
|
||||||
|
|
||||||
# Should produce valid results in case of (in)equality between two histograms.
|
# Should produce valid results in case of (in)equality between two histograms.
|
||||||
eval instant at 5m http_requests_histogram == http_requests_histogram
|
eval instant at 5m http_requests_histogram == http_requests_histogram
|
||||||
|
expect no_info
|
||||||
http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}
|
http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}
|
||||||
|
|
||||||
eval instant at 5m http_requests_histogram != http_requests_histogram
|
eval instant at 5m http_requests_histogram != http_requests_histogram
|
||||||
|
expect no_info
|
||||||
|
|
||||||
# group_left/group_right.
|
# group_left/group_right.
|
||||||
|
|
||||||
|
@ -470,7 +478,8 @@ load 5m
|
||||||
testmetric1{src="a",dst="b"} 0
|
testmetric1{src="a",dst="b"} 0
|
||||||
testmetric2{src="a",dst="b"} 1
|
testmetric2{src="a",dst="b"} 1
|
||||||
|
|
||||||
eval_fail instant at 0m -{__name__=~'testmetric1|testmetric2'}
|
eval instant at 0m -{__name__=~'testmetric1|testmetric2'}
|
||||||
|
expect fail
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
||||||
|
@ -520,290 +529,386 @@ load 6m
|
||||||
right_floats_for_histograms 0 -1 2 3 4
|
right_floats_for_histograms 0 -1 2 3 4
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == right_floats
|
eval range from 0 to 60m step 6m left_floats == right_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf
|
left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == bool right_floats
|
eval range from 0 to 60m step 6m left_floats == bool right_floats
|
||||||
|
expect no_info
|
||||||
{} 0 _ _ _ 1 _ 0 0 0 1 1
|
{} 0 _ _ _ 1 _ 0 0 0 1 1
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == does_not_match
|
eval range from 0 to 60m step 6m left_floats == does_not_match
|
||||||
|
expect no_info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 24m step 6m left_histograms == right_histograms
|
eval range from 0 to 24m step 6m left_histograms == right_histograms
|
||||||
|
expect no_info
|
||||||
left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _
|
left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _
|
||||||
|
|
||||||
eval range from 0 to 24m step 6m left_histograms == bool right_histograms
|
eval range from 0 to 24m step 6m left_histograms == bool right_histograms
|
||||||
|
expect no_info
|
||||||
{} 1 0 _ _ _
|
{} 1 0 _ _ _
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms == right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats != right_floats
|
eval range from 0 to 60m step 6m left_floats != right_floats
|
||||||
|
expect no_info
|
||||||
left_floats 1 _ _ _ _ _ 4 5 NaN _ _
|
left_floats 1 _ _ _ _ _ 4 5 NaN _ _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats != bool right_floats
|
eval range from 0 to 60m step 6m left_floats != bool right_floats
|
||||||
|
expect no_info
|
||||||
{} 1 _ _ _ 0 _ 1 1 1 0 0
|
{} 1 _ _ _ 0 _ 1 1 1 0 0
|
||||||
|
|
||||||
eval range from 0 to 24m step 6m left_histograms != right_histograms
|
eval range from 0 to 24m step 6m left_histograms != right_histograms
|
||||||
|
expect no_info
|
||||||
left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _
|
left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _
|
||||||
|
|
||||||
eval range from 0 to 24m step 6m left_histograms != bool right_histograms
|
eval range from 0 to 24m step 6m left_histograms != bool right_histograms
|
||||||
|
expect no_info
|
||||||
{} 0 1 _ _ _
|
{} 0 1 _ _ _
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms != right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats > right_floats
|
eval range from 0 to 60m step 6m left_floats > right_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ _ _ 4 _ _ _ _
|
left_floats _ _ _ _ _ _ 4 _ _ _ _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats > bool right_floats
|
eval range from 0 to 60m step 6m left_floats > bool right_floats
|
||||||
|
expect no_info
|
||||||
{} 0 _ _ _ 0 _ 1 0 0 0 0
|
{} 0 _ _ _ 0 _ 1 0 0 0 0
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > right_histograms
|
eval range from 0 to 24m step 6m left_histograms > right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms
|
eval range from 0 to 24m step 6m left_histograms > bool right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms > right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats >= right_floats
|
eval range from 0 to 60m step 6m left_floats >= right_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf
|
left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats >= bool right_floats
|
eval range from 0 to 60m step 6m left_floats >= bool right_floats
|
||||||
|
expect no_info
|
||||||
{} 0 _ _ _ 1 _ 1 0 0 1 1
|
{} 0 _ _ _ 1 _ 1 0 0 1 1
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= right_histograms
|
eval range from 0 to 24m step 6m left_histograms >= right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms
|
eval range from 0 to 24m step 6m left_histograms >= bool right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats < right_floats
|
eval range from 0 to 60m step 6m left_floats < right_floats
|
||||||
|
expect no_info
|
||||||
left_floats 1 _ _ _ _ _ _ 5 _ _ _
|
left_floats 1 _ _ _ _ _ _ 5 _ _ _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats < bool right_floats
|
eval range from 0 to 60m step 6m left_floats < bool right_floats
|
||||||
|
expect no_info
|
||||||
{} 1 _ _ _ 0 _ 0 1 0 0 0
|
{} 1 _ _ _ 0 _ 0 1 0 0 0
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < right_histograms
|
eval range from 0 to 24m step 6m left_histograms < right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms
|
eval range from 0 to 24m step 6m left_histograms < bool right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms < right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats <= right_floats
|
eval range from 0 to 60m step 6m left_floats <= right_floats
|
||||||
|
expect no_info
|
||||||
left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf
|
left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats <= bool right_floats
|
eval range from 0 to 60m step 6m left_floats <= bool right_floats
|
||||||
|
expect no_info
|
||||||
{} 1 _ _ _ 1 _ 0 1 0 1 1
|
{} 1 _ _ _ 1 _ 0 1 0 1 1
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= right_histograms
|
eval range from 0 to 24m step 6m left_histograms <= right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms
|
eval range from 0 to 24m step 6m left_histograms <= bool right_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms
|
eval range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
# Vector / scalar combinations with scalar on right side
|
# Vector / scalar combinations with scalar on right side
|
||||||
eval range from 0 to 60m step 6m left_floats == 3
|
eval range from 0 to 60m step 6m left_floats == 3
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ 3 _ _ _ _ _ _
|
left_floats _ _ _ _ 3 _ _ _ _ _ _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats != 3
|
eval range from 0 to 60m step 6m left_floats != 3
|
||||||
|
expect no_info
|
||||||
left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf
|
left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats > 3
|
eval range from 0 to 60m step 6m left_floats > 3
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ _ _ 4 5 _ Inf _
|
left_floats _ _ _ _ _ _ 4 5 _ Inf _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats >= 3
|
eval range from 0 to 60m step 6m left_floats >= 3
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ 3 _ 4 5 _ Inf _
|
left_floats _ _ _ _ 3 _ 4 5 _ Inf _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats < 3
|
eval range from 0 to 60m step 6m left_floats < 3
|
||||||
|
expect no_info
|
||||||
left_floats 1 2 _ _ _ _ _ _ _ _ -Inf
|
left_floats 1 2 _ _ _ _ _ _ _ _ -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats <= 3
|
eval range from 0 to 60m step 6m left_floats <= 3
|
||||||
|
expect no_info
|
||||||
left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf
|
left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == bool 3
|
eval range from 0 to 60m step 6m left_floats == bool 3
|
||||||
|
expect no_info
|
||||||
{} 0 0 _ _ 1 _ 0 0 0 0 0
|
{} 0 0 _ _ 1 _ 0 0 0 0 0
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == Inf
|
eval range from 0 to 60m step 6m left_floats == Inf
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ _ _ _ _ _ Inf _
|
left_floats _ _ _ _ _ _ _ _ _ Inf _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == bool Inf
|
eval range from 0 to 60m step 6m left_floats == bool Inf
|
||||||
|
expect no_info
|
||||||
{} 0 0 _ _ 0 _ 0 0 0 1 0
|
{} 0 0 _ _ 0 _ 0 0 0 1 0
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == NaN
|
eval range from 0 to 60m step 6m left_floats == NaN
|
||||||
|
expect no_info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m left_floats == bool NaN
|
eval range from 0 to 60m step 6m left_floats == bool NaN
|
||||||
|
expect no_info
|
||||||
{} 0 0 _ _ 0 _ 0 0 0 0 0
|
{} 0 0 _ _ 0 _ 0 0 0 0 0
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms == 3
|
eval range from 0 to 24m step 6m left_histograms == 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms == 0
|
eval range from 0 to 24m step 6m left_histograms == 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms != 3
|
eval range from 0 to 24m step 6m left_histograms != 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms != 0
|
eval range from 0 to 24m step 6m left_histograms != 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > 3
|
eval range from 0 to 24m step 6m left_histograms > 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > 0
|
eval range from 0 to 24m step 6m left_histograms > 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= 3
|
eval range from 0 to 24m step 6m left_histograms >= 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= 0
|
eval range from 0 to 24m step 6m left_histograms >= 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < 3
|
eval range from 0 to 24m step 6m left_histograms < 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < 0
|
eval range from 0 to 24m step 6m left_histograms < 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= 3
|
eval range from 0 to 24m step 6m left_histograms <= 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= 0
|
eval range from 0 to 24m step 6m left_histograms <= 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms == bool 3
|
eval range from 0 to 24m step 6m left_histograms == bool 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms == bool 0
|
eval range from 0 to 24m step 6m left_histograms == bool 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms != bool 3
|
eval range from 0 to 24m step 6m left_histograms != bool 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms != bool 0
|
eval range from 0 to 24m step 6m left_histograms != bool 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > bool 3
|
eval range from 0 to 24m step 6m left_histograms > bool 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms > bool 0
|
eval range from 0 to 24m step 6m left_histograms > bool 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= bool 3
|
eval range from 0 to 24m step 6m left_histograms >= bool 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms >= bool 0
|
eval range from 0 to 24m step 6m left_histograms >= bool 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < bool 3
|
eval range from 0 to 24m step 6m left_histograms < bool 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms < bool 0
|
eval range from 0 to 24m step 6m left_histograms < bool 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= bool 3
|
eval range from 0 to 24m step 6m left_histograms <= bool 3
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m left_histograms <= bool 0
|
eval range from 0 to 24m step 6m left_histograms <= bool 0
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
# Vector / scalar combinations with scalar on left side
|
# Vector / scalar combinations with scalar on left side
|
||||||
eval range from 0 to 60m step 6m 3 == left_floats
|
eval range from 0 to 60m step 6m 3 == left_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ 3 _ _ _ _ _ _
|
left_floats _ _ _ _ 3 _ _ _ _ _ _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m 3 != left_floats
|
eval range from 0 to 60m step 6m 3 != left_floats
|
||||||
|
expect no_info
|
||||||
left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf
|
left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m 3 < left_floats
|
eval range from 0 to 60m step 6m 3 < left_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ _ _ 4 5 _ Inf _
|
left_floats _ _ _ _ _ _ 4 5 _ Inf _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m 3 <= left_floats
|
eval range from 0 to 60m step 6m 3 <= left_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ 3 _ 4 5 _ Inf _
|
left_floats _ _ _ _ 3 _ 4 5 _ Inf _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m 3 > left_floats
|
eval range from 0 to 60m step 6m 3 > left_floats
|
||||||
|
expect no_info
|
||||||
left_floats 1 2 _ _ _ _ _ _ _ _ -Inf
|
left_floats 1 2 _ _ _ _ _ _ _ _ -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m 3 >= left_floats
|
eval range from 0 to 60m step 6m 3 >= left_floats
|
||||||
|
expect no_info
|
||||||
left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf
|
left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m 3 == bool left_floats
|
eval range from 0 to 60m step 6m 3 == bool left_floats
|
||||||
|
expect no_info
|
||||||
{} 0 0 _ _ 1 _ 0 0 0 0 0
|
{} 0 0 _ _ 1 _ 0 0 0 0 0
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m Inf == left_floats
|
eval range from 0 to 60m step 6m Inf == left_floats
|
||||||
|
expect no_info
|
||||||
left_floats _ _ _ _ _ _ _ _ _ Inf _
|
left_floats _ _ _ _ _ _ _ _ _ Inf _
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m Inf == bool left_floats
|
eval range from 0 to 60m step 6m Inf == bool left_floats
|
||||||
|
expect no_info
|
||||||
{} 0 0 _ _ 0 _ 0 0 0 1 0
|
{} 0 0 _ _ 0 _ 0 0 0 1 0
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m NaN == left_floats
|
eval range from 0 to 60m step 6m NaN == left_floats
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval range from 0 to 60m step 6m NaN == bool left_floats
|
eval range from 0 to 60m step 6m NaN == bool left_floats
|
||||||
|
expect no_info
|
||||||
{} 0 0 _ _ 0 _ 0 0 0 0 0
|
{} 0 0 _ _ 0 _ 0 0 0 0 0
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 3 == left_histograms
|
eval range from 0 to 24m step 6m 3 == left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 0 == left_histograms
|
eval range from 0 to 24m step 6m 0 == left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 3 != left_histograms
|
eval range from 0 to 24m step 6m 3 != left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 0 != left_histograms
|
eval range from 0 to 24m step 6m 0 != left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 3 < left_histograms
|
eval range from 0 to 24m step 6m 3 < left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 0 < left_histograms
|
eval range from 0 to 24m step 6m 0 < left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 3 < left_histograms
|
eval range from 0 to 24m step 6m 3 < left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 0 < left_histograms
|
eval range from 0 to 24m step 6m 0 < left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 3 > left_histograms
|
eval range from 0 to 24m step 6m 3 > left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 0 > left_histograms
|
eval range from 0 to 24m step 6m 0 > left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 3 >= left_histograms
|
eval range from 0 to 24m step 6m 3 >= left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
eval_info range from 0 to 24m step 6m 0 >= left_histograms
|
eval range from 0 to 24m step 6m 0 >= left_histograms
|
||||||
|
expect info
|
||||||
# No results.
|
# No results.
|
||||||
|
|
||||||
clear
|
clear
|
||||||
|
|
|
@ -156,4 +156,6 @@ load 5m
|
||||||
foo 3+0x10
|
foo 3+0x10
|
||||||
|
|
||||||
eval instant at 12m min_over_time((topk(1, foo))[1m:5m])
|
eval instant at 12m min_over_time((topk(1, foo))[1m:5m])
|
||||||
|
expect no_info
|
||||||
|
expect no_warn
|
||||||
#empty
|
#empty
|
||||||
|
|
|
@ -127,7 +127,10 @@ func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error {
|
||||||
go m.reloader()
|
go m.reloader()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case ts := <-tsets:
|
case ts, ok := <-tsets:
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
m.updateTsets(ts)
|
m.updateTsets(ts)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -149,12 +149,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
||||||
return nil, fmt.Errorf("error creating HTTP client: %w", err)
|
return nil, fmt.Errorf("error creating HTTP client: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
validationScheme, err := config.ToValidationScheme(cfg.MetricNameValidationScheme)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid metric name validation scheme: %w", err)
|
|
||||||
}
|
|
||||||
var escapingScheme model.EscapingScheme
|
var escapingScheme model.EscapingScheme
|
||||||
escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, validationScheme)
|
escapingScheme, err = config.ToEscapingScheme(cfg.MetricNameEscapingScheme, cfg.MetricNameValidationScheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err)
|
return nil, fmt.Errorf("invalid metric name escaping scheme, %w", err)
|
||||||
}
|
}
|
||||||
|
@ -172,7 +168,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
||||||
logger: logger,
|
logger: logger,
|
||||||
metrics: metrics,
|
metrics: metrics,
|
||||||
httpOpts: options.HTTPClientOptions,
|
httpOpts: options.HTTPClientOptions,
|
||||||
validationScheme: validationScheme,
|
validationScheme: cfg.MetricNameValidationScheme,
|
||||||
escapingScheme: escapingScheme,
|
escapingScheme: escapingScheme,
|
||||||
}
|
}
|
||||||
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
||||||
|
@ -325,11 +321,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
||||||
sp.config = cfg
|
sp.config = cfg
|
||||||
oldClient := sp.client
|
oldClient := sp.client
|
||||||
sp.client = client
|
sp.client = client
|
||||||
validationScheme, err := config.ToValidationScheme(cfg.MetricNameValidationScheme)
|
sp.validationScheme = cfg.MetricNameValidationScheme
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid metric name validation scheme: %w", err)
|
|
||||||
}
|
|
||||||
sp.validationScheme = validationScheme
|
|
||||||
var escapingScheme model.EscapingScheme
|
var escapingScheme model.EscapingScheme
|
||||||
escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme)
|
escapingScheme, err = model.ToEscapingScheme(cfg.MetricNameEscapingScheme)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -84,7 +84,7 @@ func TestNewScrapePool(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
app = &nopAppendable{}
|
app = &nopAppendable{}
|
||||||
cfg = &config.ScrapeConfig{
|
cfg = &config.ScrapeConfig{
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
sp, err = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
sp, err = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||||
|
@ -327,7 +327,7 @@ func TestDroppedTargetsList(t *testing.T) {
|
||||||
cfg = &config.ScrapeConfig{
|
cfg = &config.ScrapeConfig{
|
||||||
JobName: "dropMe",
|
JobName: "dropMe",
|
||||||
ScrapeInterval: model.Duration(1),
|
ScrapeInterval: model.Duration(1),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
RelabelConfigs: []*relabel.Config{
|
RelabelConfigs: []*relabel.Config{
|
||||||
{
|
{
|
||||||
|
@ -374,7 +374,7 @@ func TestDiscoveredLabelsUpdate(t *testing.T) {
|
||||||
sp.config = &config.ScrapeConfig{
|
sp.config = &config.ScrapeConfig{
|
||||||
ScrapeInterval: model.Duration(1),
|
ScrapeInterval: model.Duration(1),
|
||||||
ScrapeTimeout: model.Duration(1),
|
ScrapeTimeout: model.Duration(1),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
sp.activeTargets = make(map[uint64]*Target)
|
sp.activeTargets = make(map[uint64]*Target)
|
||||||
|
@ -506,7 +506,7 @@ func TestScrapePoolReload(t *testing.T) {
|
||||||
reloadCfg := &config.ScrapeConfig{
|
reloadCfg := &config.ScrapeConfig{
|
||||||
ScrapeInterval: model.Duration(3 * time.Second),
|
ScrapeInterval: model.Duration(3 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
// On starting to run, new loops created on reload check whether their preceding
|
// On starting to run, new loops created on reload check whether their preceding
|
||||||
|
@ -600,7 +600,7 @@ func TestScrapePoolReloadPreserveRelabeledIntervalTimeout(t *testing.T) {
|
||||||
reloadCfg := &config.ScrapeConfig{
|
reloadCfg := &config.ScrapeConfig{
|
||||||
ScrapeInterval: model.Duration(3 * time.Second),
|
ScrapeInterval: model.Duration(3 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
newLoop := func(opts scrapeLoopOptions) loop {
|
newLoop := func(opts scrapeLoopOptions) loop {
|
||||||
|
@ -701,7 +701,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
||||||
require.NoError(t, sp.reload(&config.ScrapeConfig{
|
require.NoError(t, sp.reload(&config.ScrapeConfig{
|
||||||
ScrapeInterval: model.Duration(3 * time.Second),
|
ScrapeInterval: model.Duration(3 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
TargetLimit: l,
|
TargetLimit: l,
|
||||||
}))
|
}))
|
||||||
|
@ -791,7 +791,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
||||||
|
|
||||||
func TestScrapePoolAppender(t *testing.T) {
|
func TestScrapePoolAppender(t *testing.T) {
|
||||||
cfg := &config.ScrapeConfig{
|
cfg := &config.ScrapeConfig{
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
app := &nopAppendable{}
|
app := &nopAppendable{}
|
||||||
|
@ -869,7 +869,7 @@ func TestScrapePoolRaces(t *testing.T) {
|
||||||
return &config.ScrapeConfig{
|
return &config.ScrapeConfig{
|
||||||
ScrapeInterval: interval,
|
ScrapeInterval: interval,
|
||||||
ScrapeTimeout: timeout,
|
ScrapeTimeout: timeout,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -943,7 +943,7 @@ func TestScrapePoolScrapeLoopsStarted(t *testing.T) {
|
||||||
require.NoError(t, sp.reload(&config.ScrapeConfig{
|
require.NoError(t, sp.reload(&config.ScrapeConfig{
|
||||||
ScrapeInterval: model.Duration(3 * time.Second),
|
ScrapeInterval: model.Duration(3 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(2 * time.Second),
|
ScrapeTimeout: model.Duration(2 * time.Second),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}))
|
}))
|
||||||
sp.Sync(tgs)
|
sp.Sync(tgs)
|
||||||
|
@ -3621,14 +3621,14 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||||
t1 = &Target{
|
t1 = &Target{
|
||||||
labels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
|
labels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
|
||||||
scrapeConfig: &config.ScrapeConfig{
|
scrapeConfig: &config.ScrapeConfig{
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -3648,7 +3648,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3659,7 +3659,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics2",
|
MetricsPath: "/metrics2",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3671,7 +3671,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics2",
|
MetricsPath: "/metrics2",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3684,7 +3684,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics2",
|
MetricsPath: "/metrics2",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3700,7 +3700,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics2",
|
MetricsPath: "/metrics2",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3714,7 +3714,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics2",
|
MetricsPath: "/metrics2",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3726,7 +3726,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
LabelLimit: 1,
|
LabelLimit: 1,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3738,7 +3738,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
ScrapeTimeout: model.Duration(15 * time.Second),
|
ScrapeTimeout: model.Duration(15 * time.Second),
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
LabelLimit: 15,
|
LabelLimit: 15,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3751,7 +3751,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
LabelLimit: 15,
|
LabelLimit: 15,
|
||||||
LabelNameLengthLimit: 5,
|
LabelNameLengthLimit: 5,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3765,7 +3765,7 @@ func TestReuseScrapeCache(t *testing.T) {
|
||||||
LabelLimit: 15,
|
LabelLimit: 15,
|
||||||
LabelNameLengthLimit: 5,
|
LabelNameLengthLimit: 5,
|
||||||
LabelValueLengthLimit: 7,
|
LabelValueLengthLimit: 7,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -3830,7 +3830,7 @@ func TestReuseCacheRace(t *testing.T) {
|
||||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||||
ScrapeInterval: model.Duration(5 * time.Second),
|
ScrapeInterval: model.Duration(5 * time.Second),
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
||||||
|
@ -3854,7 +3854,7 @@ func TestReuseCacheRace(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(1 * time.Millisecond),
|
ScrapeInterval: model.Duration(1 * time.Millisecond),
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
SampleLimit: i,
|
SampleLimit: i,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -3932,7 +3932,7 @@ func TestScrapeReportLimit(t *testing.T) {
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3988,7 +3988,7 @@ func TestScrapeUTF8(t *testing.T) {
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
ts, scrapedTwice := newScrapableServer("{\"with.dots\"} 42\n")
|
ts, scrapedTwice := newScrapableServer("{\"with.dots\"} 42\n")
|
||||||
|
@ -4124,7 +4124,7 @@ func TestTargetScrapeIntervalAndTimeoutRelabel(t *testing.T) {
|
||||||
config := &config.ScrapeConfig{
|
config := &config.ScrapeConfig{
|
||||||
ScrapeInterval: interval,
|
ScrapeInterval: interval,
|
||||||
ScrapeTimeout: timeout,
|
ScrapeTimeout: timeout,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
RelabelConfigs: []*relabel.Config{
|
RelabelConfigs: []*relabel.Config{
|
||||||
{
|
{
|
||||||
|
@ -4186,7 +4186,7 @@ func TestLeQuantileReLabel(t *testing.T) {
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4695,10 +4695,10 @@ metric: <
|
||||||
SampleLimit: 100,
|
SampleLimit: 100,
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
ScrapeInterval: model.Duration(50 * time.Millisecond),
|
ScrapeInterval: model.Duration(50 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(25 * time.Millisecond),
|
ScrapeTimeout: model.Duration(49 * time.Millisecond),
|
||||||
AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms,
|
AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms,
|
||||||
ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB,
|
ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4747,7 +4747,7 @@ metric: <
|
||||||
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
|
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
require.Len(t, sp.ActiveTargets(), 1)
|
require.Eventually(t, func() bool { return len(sp.ActiveTargets()) == 1 }, 5*time.Second, 50*time.Millisecond)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(5 * time.Second):
|
case <-time.After(5 * time.Second):
|
||||||
|
@ -4826,7 +4826,7 @@ func TestTypeUnitReLabel(t *testing.T) {
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4967,7 +4967,7 @@ func TestScrapeLoopCompression(t *testing.T) {
|
||||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||||
EnableCompression: tc.enableCompression,
|
EnableCompression: tc.enableCompression,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5123,7 +5123,7 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
|
||||||
model.AddressLabel, serverURL.Host,
|
model.AddressLabel, serverURL.Host,
|
||||||
),
|
),
|
||||||
scrapeConfig: &config.ScrapeConfig{
|
scrapeConfig: &config.ScrapeConfig{
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
Params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}},
|
Params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}},
|
||||||
},
|
},
|
||||||
|
@ -5379,7 +5379,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
|
||||||
JobName: jobName,
|
JobName: jobName,
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
MetricsPath: expectedPath,
|
MetricsPath: expectedPath,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
},
|
},
|
||||||
targets: []*targetgroup.Group{
|
targets: []*targetgroup.Group{
|
||||||
|
@ -5398,7 +5398,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
|
||||||
JobName: jobName,
|
JobName: jobName,
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
MetricsPath: secondPath,
|
MetricsPath: secondPath,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
Params: url.Values{"param": []string{secondParam}},
|
Params: url.Values{"param": []string{secondParam}},
|
||||||
},
|
},
|
||||||
|
@ -5423,7 +5423,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
|
||||||
JobName: jobName,
|
JobName: jobName,
|
||||||
Scheme: httpScheme,
|
Scheme: httpScheme,
|
||||||
MetricsPath: secondPath,
|
MetricsPath: secondPath,
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
Params: url.Values{"param": []string{secondParam}},
|
Params: url.Values{"param": []string{secondParam}},
|
||||||
RelabelConfigs: []*relabel.Config{
|
RelabelConfigs: []*relabel.Config{
|
||||||
|
@ -5504,7 +5504,7 @@ func TestScrapePoolScrapeAfterReload(t *testing.T) {
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||||
MetricNameValidationScheme: config.UTF8ValidationConfig,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
EnableCompression: false,
|
EnableCompression: false,
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
|
|
|
@ -25,15 +25,17 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
|
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.24.x
|
go-version: 1.24.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
if: github.repository == 'prometheus/snmp_exporter'
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0
|
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||||
with:
|
with:
|
||||||
args: --verbose
|
args: --verbose
|
||||||
version: v2.1.5
|
version: v2.2.1
|
||||||
|
|
|
@ -223,6 +223,19 @@ type SelectHints struct {
|
||||||
// When disabled, the result may contain samples outside the queried time range but Select() performances
|
// When disabled, the result may contain samples outside the queried time range but Select() performances
|
||||||
// may be improved.
|
// may be improved.
|
||||||
DisableTrimming bool
|
DisableTrimming bool
|
||||||
|
|
||||||
|
// Projection hints. They are currently unused in the Prometheus promql engine but can be used by different
|
||||||
|
// implementations of the Queryable interface and engines.
|
||||||
|
// These hints are useful for queries like `sum by (label) (rate(metric[5m]))` - we can safely evaluate it
|
||||||
|
// even if we only fetch the `label` label. For some storage implementations this is beneficial.
|
||||||
|
|
||||||
|
// ProjectionLabels are the minimum amount of labels required to be fetched for this Select call
|
||||||
|
// When honored it is required to add an __series_hash__ label containing the hash of all labels
|
||||||
|
// of a particular series so that the engine can still perform horizontal joins.
|
||||||
|
ProjectionLabels []string
|
||||||
|
|
||||||
|
// ProjectionInclude defines if we have to include or exclude the labels from the ProjectLabels field.
|
||||||
|
ProjectionInclude bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelHints specifies hints passed for label reads.
|
// LabelHints specifies hints passed for label reads.
|
||||||
|
|
|
@ -549,6 +549,7 @@ type chunkedSeriesSet struct {
|
||||||
|
|
||||||
current storage.Series
|
current storage.Series
|
||||||
err error
|
err error
|
||||||
|
exhausted bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChunkedSeriesSet(chunkedReader *ChunkedReader, respBody io.ReadCloser, mint, maxt int64, cancel func(error)) storage.SeriesSet {
|
func NewChunkedSeriesSet(chunkedReader *ChunkedReader, respBody io.ReadCloser, mint, maxt int64, cancel func(error)) storage.SeriesSet {
|
||||||
|
@ -564,6 +565,12 @@ func NewChunkedSeriesSet(chunkedReader *ChunkedReader, respBody io.ReadCloser, m
|
||||||
// Next return true if there is a next series and false otherwise. It will
|
// Next return true if there is a next series and false otherwise. It will
|
||||||
// block until the next series is available.
|
// block until the next series is available.
|
||||||
func (s *chunkedSeriesSet) Next() bool {
|
func (s *chunkedSeriesSet) Next() bool {
|
||||||
|
if s.exhausted {
|
||||||
|
// Don't try to read the next series again.
|
||||||
|
// This prevents errors like "http: read on closed response body" if Next() is called after it has already returned false.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
res := &prompb.ChunkedReadResponse{}
|
res := &prompb.ChunkedReadResponse{}
|
||||||
|
|
||||||
err := s.chunkedReader.NextProto(res)
|
err := s.chunkedReader.NextProto(res)
|
||||||
|
@ -575,6 +582,7 @@ func (s *chunkedSeriesSet) Next() bool {
|
||||||
|
|
||||||
_ = s.respBody.Close()
|
_ = s.respBody.Close()
|
||||||
s.cancel(err)
|
s.cancel(err)
|
||||||
|
s.exhausted = true
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -892,7 +893,8 @@ func TestChunkedSeriesSet(t *testing.T) {
|
||||||
flusher := &mockFlusher{}
|
flusher := &mockFlusher{}
|
||||||
|
|
||||||
w := NewChunkedWriter(buf, flusher)
|
w := NewChunkedWriter(buf, flusher)
|
||||||
r := NewChunkedReader(buf, config.DefaultChunkedReadLimit, nil)
|
wrappedReader := newOneShotCloser(buf)
|
||||||
|
r := NewChunkedReader(wrappedReader, config.DefaultChunkedReadLimit, nil)
|
||||||
|
|
||||||
chks := buildTestChunks(t)
|
chks := buildTestChunks(t)
|
||||||
l := []prompb.Label{
|
l := []prompb.Label{
|
||||||
|
@ -913,7 +915,7 @@ func TestChunkedSeriesSet(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ss := NewChunkedSeriesSet(r, io.NopCloser(buf), 0, 14000, func(error) {})
|
ss := NewChunkedSeriesSet(r, wrappedReader, 0, 14000, func(error) {})
|
||||||
require.NoError(t, ss.Err())
|
require.NoError(t, ss.Err())
|
||||||
require.Nil(t, ss.Warnings())
|
require.Nil(t, ss.Warnings())
|
||||||
|
|
||||||
|
@ -938,6 +940,9 @@ func TestChunkedSeriesSet(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.Equal(t, numTestChunks, numResponses)
|
require.Equal(t, numTestChunks, numResponses)
|
||||||
require.NoError(t, ss.Err())
|
require.NoError(t, ss.Err())
|
||||||
|
|
||||||
|
require.False(t, ss.Next(), "Next() should still return false after it previously returned false")
|
||||||
|
require.NoError(t, ss.Err(), "Err() should not return an error if Next() is called again after it previously returned false")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("chunked reader error", func(t *testing.T) {
|
t.Run("chunked reader error", func(t *testing.T) {
|
||||||
|
@ -983,6 +988,32 @@ type mockFlusher struct{}
|
||||||
|
|
||||||
func (f *mockFlusher) Flush() {}
|
func (f *mockFlusher) Flush() {}
|
||||||
|
|
||||||
|
type oneShotCloser struct {
|
||||||
|
r io.Reader
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOneShotCloser(r io.Reader) io.ReadCloser {
|
||||||
|
return &oneShotCloser{r, false}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *oneShotCloser) Read(p []byte) (n int, err error) {
|
||||||
|
if c.closed {
|
||||||
|
return 0, errors.New("already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.r.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *oneShotCloser) Close() error {
|
||||||
|
if c.closed {
|
||||||
|
return errors.New("already closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
numTestChunks = 3
|
numTestChunks = 3
|
||||||
numSamplesPerTestChunk = 5
|
numSamplesPerTestChunk = 5
|
||||||
|
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"slices"
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
|
@ -57,6 +58,7 @@ const (
|
||||||
spanIDKey = "span_id"
|
spanIDKey = "span_id"
|
||||||
infoType = "info"
|
infoType = "info"
|
||||||
targetMetricName = "target_info"
|
targetMetricName = "target_info"
|
||||||
|
defaultLookbackDelta = 5 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
type bucketBoundsData struct {
|
type bucketBoundsData struct {
|
||||||
|
@ -149,11 +151,9 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
|
|
||||||
// map ensures no duplicate label names.
|
// map ensures no duplicate label names.
|
||||||
l := make(map[string]string, maxLabelCount)
|
l := make(map[string]string, maxLabelCount)
|
||||||
|
labelNamer := otlptranslator.LabelNamer{UTF8Allowed: settings.AllowUTF8}
|
||||||
for _, label := range labels {
|
for _, label := range labels {
|
||||||
finalKey := label.Name
|
finalKey := labelNamer.Build(label.Name)
|
||||||
if !settings.AllowUTF8 {
|
|
||||||
finalKey = otlptranslator.NormalizeLabel(finalKey)
|
|
||||||
}
|
|
||||||
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
|
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
|
||||||
l[finalKey] = existingValue + ";" + label.Value
|
l[finalKey] = existingValue + ";" + label.Value
|
||||||
} else {
|
} else {
|
||||||
|
@ -162,10 +162,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lbl := range promotedAttrs {
|
for _, lbl := range promotedAttrs {
|
||||||
normalized := lbl.Name
|
normalized := labelNamer.Build(lbl.Name)
|
||||||
if !settings.AllowUTF8 {
|
|
||||||
normalized = otlptranslator.NormalizeLabel(normalized)
|
|
||||||
}
|
|
||||||
if _, exists := l[normalized]; !exists {
|
if _, exists := l[normalized]; !exists {
|
||||||
l[normalized] = lbl.Value
|
l[normalized] = lbl.Value
|
||||||
}
|
}
|
||||||
|
@ -203,8 +200,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting
|
||||||
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||||
}
|
}
|
||||||
// internal labels should be maintained.
|
// internal labels should be maintained.
|
||||||
if !settings.AllowUTF8 && (len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__") {
|
if len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__" {
|
||||||
name = otlptranslator.NormalizeLabel(name)
|
name = labelNamer.Build(name)
|
||||||
}
|
}
|
||||||
l[name] = extras[i+1]
|
l[name] = extras[i+1]
|
||||||
}
|
}
|
||||||
|
@ -404,39 +401,49 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes,
|
||||||
return promExemplars, nil
|
return promExemplars, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics.
|
// findMinAndMaxTimestamps returns the minimum of minTimestamp and the earliest timestamp in metric and
|
||||||
func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp {
|
// the maximum of maxTimestamp and the latest timestamp in metric, respectively.
|
||||||
var ts pcommon.Timestamp
|
func findMinAndMaxTimestamps(metric pmetric.Metric, minTimestamp, maxTimestamp pcommon.Timestamp) (pcommon.Timestamp, pcommon.Timestamp) {
|
||||||
// handle individual metric based on type
|
// handle individual metric based on type
|
||||||
//exhaustive:enforce
|
//exhaustive:enforce
|
||||||
switch metric.Type() {
|
switch metric.Type() {
|
||||||
case pmetric.MetricTypeGauge:
|
case pmetric.MetricTypeGauge:
|
||||||
dataPoints := metric.Gauge().DataPoints()
|
dataPoints := metric.Gauge().DataPoints()
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
ts = max(ts, dataPoints.At(x).Timestamp())
|
ts := dataPoints.At(x).Timestamp()
|
||||||
|
minTimestamp = min(minTimestamp, ts)
|
||||||
|
maxTimestamp = max(maxTimestamp, ts)
|
||||||
}
|
}
|
||||||
case pmetric.MetricTypeSum:
|
case pmetric.MetricTypeSum:
|
||||||
dataPoints := metric.Sum().DataPoints()
|
dataPoints := metric.Sum().DataPoints()
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
ts = max(ts, dataPoints.At(x).Timestamp())
|
ts := dataPoints.At(x).Timestamp()
|
||||||
|
minTimestamp = min(minTimestamp, ts)
|
||||||
|
maxTimestamp = max(maxTimestamp, ts)
|
||||||
}
|
}
|
||||||
case pmetric.MetricTypeHistogram:
|
case pmetric.MetricTypeHistogram:
|
||||||
dataPoints := metric.Histogram().DataPoints()
|
dataPoints := metric.Histogram().DataPoints()
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
ts = max(ts, dataPoints.At(x).Timestamp())
|
ts := dataPoints.At(x).Timestamp()
|
||||||
|
minTimestamp = min(minTimestamp, ts)
|
||||||
|
maxTimestamp = max(maxTimestamp, ts)
|
||||||
}
|
}
|
||||||
case pmetric.MetricTypeExponentialHistogram:
|
case pmetric.MetricTypeExponentialHistogram:
|
||||||
dataPoints := metric.ExponentialHistogram().DataPoints()
|
dataPoints := metric.ExponentialHistogram().DataPoints()
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
ts = max(ts, dataPoints.At(x).Timestamp())
|
ts := dataPoints.At(x).Timestamp()
|
||||||
|
minTimestamp = min(minTimestamp, ts)
|
||||||
|
maxTimestamp = max(maxTimestamp, ts)
|
||||||
}
|
}
|
||||||
case pmetric.MetricTypeSummary:
|
case pmetric.MetricTypeSummary:
|
||||||
dataPoints := metric.Summary().DataPoints()
|
dataPoints := metric.Summary().DataPoints()
|
||||||
for x := 0; x < dataPoints.Len(); x++ {
|
for x := 0; x < dataPoints.Len(); x++ {
|
||||||
ts = max(ts, dataPoints.At(x).Timestamp())
|
ts := dataPoints.At(x).Timestamp()
|
||||||
|
minTimestamp = min(minTimestamp, ts)
|
||||||
|
maxTimestamp = max(maxTimestamp, ts)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ts
|
return minTimestamp, maxTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
|
func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource,
|
||||||
|
@ -569,8 +576,8 @@ func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTi
|
||||||
}
|
}
|
||||||
|
|
||||||
// addResourceTargetInfo converts the resource to the target info metric.
|
// addResourceTargetInfo converts the resource to the target info metric.
|
||||||
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, converter *PrometheusConverter) {
|
func addResourceTargetInfo(resource pcommon.Resource, settings Settings, earliestTimestamp, latestTimestamp time.Time, converter *PrometheusConverter) {
|
||||||
if settings.DisableTargetInfo || timestamp == 0 {
|
if settings.DisableTargetInfo {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -616,12 +623,27 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sample := &prompb.Sample{
|
// Generate target_info samples starting at earliestTimestamp and ending at latestTimestamp,
|
||||||
Value: float64(1),
|
// with a sample at every interval between them.
|
||||||
// convert ns to ms
|
// Use an interval corresponding to half of the lookback delta, to ensure that target_info samples are found
|
||||||
Timestamp: convertTimeStamp(timestamp),
|
// for the entirety of the relevant period.
|
||||||
|
if settings.LookbackDelta == 0 {
|
||||||
|
settings.LookbackDelta = defaultLookbackDelta
|
||||||
|
}
|
||||||
|
interval := settings.LookbackDelta / 2
|
||||||
|
ts, _ := converter.getOrCreateTimeSeries(labels)
|
||||||
|
for timestamp := earliestTimestamp; timestamp.Before(latestTimestamp); timestamp = timestamp.Add(interval) {
|
||||||
|
ts.Samples = append(ts.Samples, prompb.Sample{
|
||||||
|
Value: float64(1),
|
||||||
|
Timestamp: timestamp.UnixMilli(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if len(ts.Samples) == 0 || ts.Samples[len(ts.Samples)-1].Timestamp < latestTimestamp.UnixMilli() {
|
||||||
|
ts.Samples = append(ts.Samples, prompb.Sample{
|
||||||
|
Value: float64(1),
|
||||||
|
Timestamp: latestTimestamp.UnixMilli(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
converter.addSample(sample, labels)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
|
// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms.
|
||||||
|
|
|
@ -20,7 +20,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/otlptranslator"
|
"github.com/prometheus/otlptranslator"
|
||||||
"go.opentelemetry.io/collector/pdata/pcommon"
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
||||||
|
@ -48,6 +50,8 @@ type Settings struct {
|
||||||
KeepIdentifyingResourceAttributes bool
|
KeepIdentifyingResourceAttributes bool
|
||||||
ConvertHistogramsToNHCB bool
|
ConvertHistogramsToNHCB bool
|
||||||
AllowDeltaTemporality bool
|
AllowDeltaTemporality bool
|
||||||
|
// LookbackDelta is the PromQL engine lookback delta.
|
||||||
|
LookbackDelta time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrometheusConverter converts from OTel write format to Prometheus remote write format.
|
// PrometheusConverter converts from OTel write format to Prometheus remote write format.
|
||||||
|
@ -113,9 +117,10 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
||||||
resourceMetrics := resourceMetricsSlice.At(i)
|
resourceMetrics := resourceMetricsSlice.At(i)
|
||||||
resource := resourceMetrics.Resource()
|
resource := resourceMetrics.Resource()
|
||||||
scopeMetricsSlice := resourceMetrics.ScopeMetrics()
|
scopeMetricsSlice := resourceMetrics.ScopeMetrics()
|
||||||
// keep track of the most recent timestamp in the ResourceMetrics for
|
// keep track of the earliest and latest timestamp in the ResourceMetrics for
|
||||||
// use with the "target" info metric
|
// use with the "target" info metric
|
||||||
var mostRecentTimestamp pcommon.Timestamp
|
earliestTimestamp := pcommon.Timestamp(math.MaxUint64)
|
||||||
|
latestTimestamp := pcommon.Timestamp(0)
|
||||||
for j := 0; j < scopeMetricsSlice.Len(); j++ {
|
for j := 0; j < scopeMetricsSlice.Len(); j++ {
|
||||||
metricSlice := scopeMetricsSlice.At(j).Metrics()
|
metricSlice := scopeMetricsSlice.At(j).Metrics()
|
||||||
|
|
||||||
|
@ -127,7 +132,7 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
metric := metricSlice.At(k)
|
metric := metricSlice.At(k)
|
||||||
mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric))
|
earliestTimestamp, latestTimestamp = findMinAndMaxTimestamps(metric, earliestTimestamp, latestTimestamp)
|
||||||
temporality, hasTemporality, err := aggregationTemporality(metric)
|
temporality, hasTemporality, err := aggregationTemporality(metric)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = multierr.Append(errs, err)
|
errs = multierr.Append(errs, err)
|
||||||
|
@ -242,7 +247,11 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
addResourceTargetInfo(resource, settings, mostRecentTimestamp, c)
|
if earliestTimestamp < pcommon.Timestamp(math.MaxUint64) {
|
||||||
|
// We have at least one metric sample for this resource.
|
||||||
|
// Generate a corresponding target_info series.
|
||||||
|
addResourceTargetInfo(resource, settings, earliestTimestamp.AsTime(), latestTimestamp.AsTime(), c)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return annots, errs
|
return annots, errs
|
||||||
|
|
|
@ -275,6 +275,100 @@ func TestFromMetrics(t *testing.T) {
|
||||||
"histogram data point has zero count, but non-zero sum: 155.000000",
|
"histogram data point has zero count, but non-zero sum: 155.000000",
|
||||||
}, ws)
|
}, ws)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("target_info's samples starts at the earliest metric sample timestamp and ends at the latest sample timestamp of the corresponding resource, with one sample every lookback delta/2 timestamps between", func(t *testing.T) {
|
||||||
|
request := pmetricotlp.NewExportRequest()
|
||||||
|
rm := request.Metrics().ResourceMetrics().AppendEmpty()
|
||||||
|
generateAttributes(rm.Resource().Attributes(), "resource", 5)
|
||||||
|
|
||||||
|
// Fake some resource attributes.
|
||||||
|
for k, v := range map[string]string{
|
||||||
|
"service.name": "test-service",
|
||||||
|
"service.namespace": "test-namespace",
|
||||||
|
"service.instance.id": "id1234",
|
||||||
|
} {
|
||||||
|
rm.Resource().Attributes().PutStr(k, v)
|
||||||
|
}
|
||||||
|
metrics := rm.ScopeMetrics().AppendEmpty().Metrics()
|
||||||
|
ts := pcommon.NewTimestampFromTime(time.Now())
|
||||||
|
|
||||||
|
var expMetadata []prompb.MetricMetadata
|
||||||
|
for i := range 3 {
|
||||||
|
m := metrics.AppendEmpty()
|
||||||
|
m.SetEmptyGauge()
|
||||||
|
m.SetName(fmt.Sprintf("gauge-%v", i+1))
|
||||||
|
m.SetDescription("gauge")
|
||||||
|
m.SetUnit("unit")
|
||||||
|
// Add samples every lookback delta / 4 timestamps.
|
||||||
|
curTs := ts.AsTime()
|
||||||
|
for range 6 {
|
||||||
|
point := m.Gauge().DataPoints().AppendEmpty()
|
||||||
|
point.SetTimestamp(pcommon.NewTimestampFromTime(curTs))
|
||||||
|
point.SetDoubleValue(1.23)
|
||||||
|
generateAttributes(point.Attributes(), "series", 2)
|
||||||
|
curTs = curTs.Add(defaultLookbackDelta / 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
namer := otlptranslator.MetricNamer{}
|
||||||
|
expMetadata = append(expMetadata, prompb.MetricMetadata{
|
||||||
|
Type: otelMetricTypeToPromMetricType(m),
|
||||||
|
MetricFamilyName: namer.Build(TranslatorMetricFromOtelMetric(m)),
|
||||||
|
Help: m.Description(),
|
||||||
|
Unit: m.Unit(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
converter := NewPrometheusConverter()
|
||||||
|
annots, err := converter.FromMetrics(
|
||||||
|
context.Background(),
|
||||||
|
request.Metrics(),
|
||||||
|
Settings{
|
||||||
|
LookbackDelta: defaultLookbackDelta,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, annots)
|
||||||
|
|
||||||
|
testutil.RequireEqual(t, expMetadata, converter.Metadata())
|
||||||
|
|
||||||
|
timeSeries := converter.TimeSeries()
|
||||||
|
tgtInfoCount := 0
|
||||||
|
for _, s := range timeSeries {
|
||||||
|
b := labels.NewScratchBuilder(2)
|
||||||
|
lbls := s.ToLabels(&b, nil)
|
||||||
|
if lbls.Get(labels.MetricName) != "target_info" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tgtInfoCount++
|
||||||
|
require.Equal(t, "test-namespace/test-service", lbls.Get("job"))
|
||||||
|
require.Equal(t, "id1234", lbls.Get("instance"))
|
||||||
|
require.False(t, lbls.Has("service_name"))
|
||||||
|
require.False(t, lbls.Has("service_namespace"))
|
||||||
|
require.False(t, lbls.Has("service_instance_id"))
|
||||||
|
// There should be a target_info sample at the earliest metric timestamp, then two spaced lookback delta/2 apart,
|
||||||
|
// then one at the latest metric timestamp.
|
||||||
|
testutil.RequireEqual(t, []prompb.Sample{
|
||||||
|
{
|
||||||
|
Value: 1,
|
||||||
|
Timestamp: ts.AsTime().UnixMilli(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: 1,
|
||||||
|
Timestamp: ts.AsTime().Add(defaultLookbackDelta / 2).UnixMilli(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: 1,
|
||||||
|
Timestamp: ts.AsTime().Add(defaultLookbackDelta).UnixMilli(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Value: 1,
|
||||||
|
Timestamp: ts.AsTime().Add(defaultLookbackDelta + defaultLookbackDelta/4).UnixMilli(),
|
||||||
|
},
|
||||||
|
}, s.Samples)
|
||||||
|
}
|
||||||
|
require.Equal(t, 1, tgtInfoCount)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemporality(t *testing.T) {
|
func TestTemporality(t *testing.T) {
|
||||||
|
|
|
@ -530,6 +530,9 @@ type OTLPOptions struct {
|
||||||
// marking the metric type as unknown for now).
|
// marking the metric type as unknown for now).
|
||||||
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
|
// We're in an early phase of implementing delta support (proposal: https://github.com/prometheus/proposals/pull/48/)
|
||||||
NativeDelta bool
|
NativeDelta bool
|
||||||
|
// LookbackDelta is the query lookback delta.
|
||||||
|
// Used to calculate the target_info sample timestamp interval.
|
||||||
|
LookbackDelta time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||||
|
@ -547,6 +550,7 @@ func NewOTLPWriteHandler(logger *slog.Logger, _ prometheus.Registerer, appendabl
|
||||||
},
|
},
|
||||||
config: configFunc,
|
config: configFunc,
|
||||||
allowDeltaTemporality: opts.NativeDelta,
|
allowDeltaTemporality: opts.NativeDelta,
|
||||||
|
lookbackDelta: opts.LookbackDelta,
|
||||||
}
|
}
|
||||||
|
|
||||||
wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
|
wh := &otlpWriteHandler{logger: logger, defaultConsumer: ex}
|
||||||
|
@ -583,19 +587,22 @@ type rwExporter struct {
|
||||||
*writeHandler
|
*writeHandler
|
||||||
config func() config.Config
|
config func() config.Config
|
||||||
allowDeltaTemporality bool
|
allowDeltaTemporality bool
|
||||||
|
lookbackDelta time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
||||||
otlpCfg := rw.config().OTLPConfig
|
otlpCfg := rw.config().OTLPConfig
|
||||||
|
|
||||||
converter := otlptranslator.NewPrometheusConverter()
|
converter := otlptranslator.NewPrometheusConverter()
|
||||||
|
|
||||||
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
|
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
|
||||||
AddMetricSuffixes: otlpCfg.TranslationStrategy != config.NoTranslation,
|
AddMetricSuffixes: otlpCfg.TranslationStrategy.ShouldAddSuffixes(),
|
||||||
AllowUTF8: otlpCfg.TranslationStrategy != config.UnderscoreEscapingWithSuffixes,
|
AllowUTF8: !otlpCfg.TranslationStrategy.ShouldEscape(),
|
||||||
PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
|
PromoteResourceAttributes: otlptranslator.NewPromoteResourceAttributes(otlpCfg),
|
||||||
KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
|
KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
|
||||||
ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
|
ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB,
|
||||||
AllowDeltaTemporality: rw.allowDeltaTemporality,
|
AllowDeltaTemporality: rw.allowDeltaTemporality,
|
||||||
|
LookbackDelta: rw.lookbackDelta,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)
|
rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)
|
||||||
|
|
|
@ -440,7 +440,32 @@ func TestOTLPWriteHandler(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "UnderscoreEscapingWithoutSuffixes",
|
||||||
|
otlpCfg: config.OTLPConfig{
|
||||||
|
TranslationStrategy: config.UnderscoreEscapingWithoutSuffixes,
|
||||||
|
},
|
||||||
|
expectedSamples: []mockSample{
|
||||||
|
{
|
||||||
|
l: labels.New(labels.Label{Name: "__name__", Value: "test_counter"},
|
||||||
|
labels.Label{Name: "foo_bar", Value: "baz"},
|
||||||
|
labels.Label{Name: "instance", Value: "test-instance"},
|
||||||
|
labels.Label{Name: "job", Value: "test-service"}),
|
||||||
|
t: timestamp.UnixMilli(),
|
||||||
|
v: 10.0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
l: labels.New(
|
||||||
|
labels.Label{Name: "__name__", Value: "target_info"},
|
||||||
|
labels.Label{Name: "host_name", Value: "test-host"},
|
||||||
|
labels.Label{Name: "instance", Value: "test-instance"},
|
||||||
|
labels.Label{Name: "job", Value: "test-service"},
|
||||||
|
),
|
||||||
|
t: timestamp.UnixMilli(),
|
||||||
|
v: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "NoUTF8EscapingWithSuffixes",
|
name: "NoUTF8EscapingWithSuffixes",
|
||||||
otlpCfg: config.OTLPConfig{
|
otlpCfg: config.OTLPConfig{
|
||||||
|
|
|
@ -263,6 +263,17 @@ func NewTemplateExpander(
|
||||||
|
|
||||||
return floatToTime(v)
|
return floatToTime(v)
|
||||||
},
|
},
|
||||||
|
"toDuration": func(i interface{}) (*time.Duration, error) {
|
||||||
|
v, err := common_templates.ConvertToFloat(i)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
d := time.Duration(v * float64(time.Second))
|
||||||
|
return &d, nil
|
||||||
|
},
|
||||||
|
"now": func() float64 {
|
||||||
|
return float64(timestamp) / 1000.0
|
||||||
|
},
|
||||||
"pathPrefix": func() string {
|
"pathPrefix": func() string {
|
||||||
return externalURL.Path
|
return externalURL.Path
|
||||||
},
|
},
|
||||||
|
@ -270,7 +281,7 @@ func NewTemplateExpander(
|
||||||
return externalURL.String()
|
return externalURL.String()
|
||||||
},
|
},
|
||||||
"parseDuration": func(d string) (float64, error) {
|
"parseDuration": func(d string) (float64, error) {
|
||||||
v, err := model.ParseDuration(d)
|
v, err := model.ParseDurationAllowNegative(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
|
@ -467,6 +468,31 @@ func TestTemplateExpansion(t *testing.T) {
|
||||||
text: `{{ ("1435065584.128" | toTime).Format "2006" }}`,
|
text: `{{ ("1435065584.128" | toTime).Format "2006" }}`,
|
||||||
output: "2015",
|
output: "2015",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// toDuration - input as float64 seconds, returns *time.Duration.
|
||||||
|
text: `{{ (1800 | toDuration).String }}`,
|
||||||
|
output: "30m0s",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// toDuration - input as string seconds, returns *time.Duration.
|
||||||
|
text: `{{ ("1800" | toDuration).String }}`,
|
||||||
|
output: "30m0s",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// now - returns fixed timestamp as float64 seconds.
|
||||||
|
text: `{{ now }}`,
|
||||||
|
output: "1.353755652e+09",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// now - returns fixed timestamp converted to formatted time string.
|
||||||
|
text: `{{ (now | toTime).Format "Mon Jan 2 15:04:05 2006" }}`,
|
||||||
|
output: "Sat Nov 24 11:14:12 2012",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// returns Unix milliseconds timestamp for 30 minutes ago.
|
||||||
|
text: `{{ ("-30m" | parseDuration | toDuration | (now | toTime).Add).UnixMilli }}`,
|
||||||
|
output: "1353753852000",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
// Title.
|
// Title.
|
||||||
text: "{{ \"aa bb CC\" | title }}",
|
text: "{{ \"aa bb CC\" | title }}",
|
||||||
|
@ -514,10 +540,15 @@ func TestTemplateExpansion(t *testing.T) {
|
||||||
output: "http://testhost:9090/path/prefix",
|
output: "http://testhost:9090/path/prefix",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// parseDuration (using printf to ensure the return is a string).
|
// parseDuration with positive duration (using printf to ensure the return is a string).
|
||||||
text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}",
|
text: "{{ printf \"%0.2f\" (parseDuration \"1h2m10ms\") }}",
|
||||||
output: "3720.01",
|
output: "3720.01",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// parseDuration with negative duration (using printf to ensure the return is a string).
|
||||||
|
text: "{{ printf \"%0.2f\" (parseDuration \"-1h2m10ms\") }}",
|
||||||
|
output: "-3720.01",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
// Simple hostname.
|
// Simple hostname.
|
||||||
text: "{{ \"foo.example.com\" | stripDomain }}",
|
text: "{{ \"foo.example.com\" | stripDomain }}",
|
||||||
|
@ -579,7 +610,7 @@ func testTemplateExpansion(t *testing.T, scenarios []scenario) {
|
||||||
}
|
}
|
||||||
var result string
|
var result string
|
||||||
var err error
|
var err error
|
||||||
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, 0, queryFunc, extURL, s.options)
|
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, model.Time(1353755652000), queryFunc, extURL, s.options)
|
||||||
if s.html {
|
if s.html {
|
||||||
result, err = expander.ExpandHTML(nil)
|
result, err = expander.ExpandHTML(nil)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -189,6 +189,8 @@ type BlockMeta struct {
|
||||||
// BlockStats contains stats about contents of a block.
|
// BlockStats contains stats about contents of a block.
|
||||||
type BlockStats struct {
|
type BlockStats struct {
|
||||||
NumSamples uint64 `json:"numSamples,omitempty"`
|
NumSamples uint64 `json:"numSamples,omitempty"`
|
||||||
|
NumFloatSamples uint64 `json:"numFloatSamples,omitempty"`
|
||||||
|
NumHistogramSamples uint64 `json:"numHistogramSamples,omitempty"`
|
||||||
NumSeries uint64 `json:"numSeries,omitempty"`
|
NumSeries uint64 `json:"numSeries,omitempty"`
|
||||||
NumChunks uint64 `json:"numChunks,omitempty"`
|
NumChunks uint64 `json:"numChunks,omitempty"`
|
||||||
NumTombstones uint64 `json:"numTombstones,omitempty"`
|
NumTombstones uint64 `json:"numTombstones,omitempty"`
|
||||||
|
|
|
@ -182,14 +182,6 @@ func DefaultPostingsDecoderFactory(_ *BlockMeta) index.PostingsDecoder {
|
||||||
return index.DecodePostingsRaw
|
return index.DecodePostingsRaw
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
|
|
||||||
return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{
|
|
||||||
MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize,
|
|
||||||
MergeFunc: mergeFunc,
|
|
||||||
EnableOverlappingCompaction: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
|
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
|
||||||
return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{
|
return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{
|
||||||
MergeFunc: mergeFunc,
|
MergeFunc: mergeFunc,
|
||||||
|
@ -895,7 +887,14 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa
|
||||||
meta.Stats.NumChunks += uint64(len(chks))
|
meta.Stats.NumChunks += uint64(len(chks))
|
||||||
meta.Stats.NumSeries++
|
meta.Stats.NumSeries++
|
||||||
for _, chk := range chks {
|
for _, chk := range chks {
|
||||||
meta.Stats.NumSamples += uint64(chk.Chunk.NumSamples())
|
samples := uint64(chk.Chunk.NumSamples())
|
||||||
|
meta.Stats.NumSamples += samples
|
||||||
|
switch chk.Chunk.Encoding() {
|
||||||
|
case chunkenc.EncHistogram, chunkenc.EncFloatHistogram:
|
||||||
|
meta.Stats.NumHistogramSamples += samples
|
||||||
|
case chunkenc.EncXOR:
|
||||||
|
meta.Stats.NumFloatSamples += samples
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, chk := range chks {
|
for _, chk := range chks {
|
||||||
|
|
|
@ -1098,6 +1098,13 @@ func TestCompaction_populateBlock(t *testing.T) {
|
||||||
s.NumChunks += uint64(len(series.chunks))
|
s.NumChunks += uint64(len(series.chunks))
|
||||||
for _, chk := range series.chunks {
|
for _, chk := range series.chunks {
|
||||||
s.NumSamples += uint64(len(chk))
|
s.NumSamples += uint64(len(chk))
|
||||||
|
for _, smpl := range chk {
|
||||||
|
if smpl.h != nil || smpl.fh != nil {
|
||||||
|
s.NumHistogramSamples++
|
||||||
|
} else {
|
||||||
|
s.NumFloatSamples++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
require.Equal(t, s, meta.Stats)
|
require.Equal(t, s, meta.Stats)
|
||||||
|
|
|
@ -9066,7 +9066,7 @@ func TestOOONativeHistogramsSettings(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// compareSeries essentially replaces `require.Equal(t, expected, actual) in
|
// compareSeries essentially replaces `require.Equal(t, expected, actual)` in
|
||||||
// situations where the actual series might contain more counter reset hints
|
// situations where the actual series might contain more counter reset hints
|
||||||
// "unknown" than the expected series. This can easily happen for long series
|
// "unknown" than the expected series. This can easily happen for long series
|
||||||
// that trigger new chunks. This function therefore tolerates counter reset
|
// that trigger new chunks. This function therefore tolerates counter reset
|
||||||
|
|
|
@ -799,13 +799,17 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||||
if errors.Is(err, storage.ErrOutOfOrderSample) {
|
if errors.Is(err, storage.ErrOutOfOrderSample) {
|
||||||
return 0, storage.ErrOutOfOrderCT
|
return 0, storage.ErrOutOfOrderCT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples.
|
// OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples.
|
||||||
// This is to prevent the injected zero from being marked as OOO forever.
|
// This is to prevent the injected zero from being marked as OOO forever.
|
||||||
if isOOO {
|
if isOOO {
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
return 0, storage.ErrOutOfOrderCT
|
return 0, storage.ErrOutOfOrderCT
|
||||||
}
|
}
|
||||||
|
|
||||||
s.pendingCommit = true
|
s.pendingCommit = true
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
a.histograms = append(a.histograms, record.RefHistogramSample{
|
a.histograms = append(a.histograms, record.RefHistogramSample{
|
||||||
|
@ -832,13 +836,17 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||||
if errors.Is(err, storage.ErrOutOfOrderSample) {
|
if errors.Is(err, storage.ErrOutOfOrderSample) {
|
||||||
return 0, storage.ErrOutOfOrderCT
|
return 0, storage.ErrOutOfOrderCT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples.
|
// OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples.
|
||||||
// This is to prevent the injected zero from being marked as OOO forever.
|
// This is to prevent the injected zero from being marked as OOO forever.
|
||||||
if isOOO {
|
if isOOO {
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
return 0, storage.ErrOutOfOrderCT
|
return 0, storage.ErrOutOfOrderCT
|
||||||
}
|
}
|
||||||
|
|
||||||
s.pendingCommit = true
|
s.pendingCommit = true
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
|
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
|
||||||
|
@ -852,6 +860,7 @@ func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset l
|
||||||
if ct > a.maxt {
|
if ct > a.maxt {
|
||||||
a.maxt = ct
|
a.maxt = ct
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.SeriesRef(s.ref), nil
|
return storage.SeriesRef(s.ref), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6717,6 +6717,75 @@ func TestHeadAppender_AppendCT(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeadAppender_AppendHistogramCTZeroSample(t *testing.T) {
|
||||||
|
type appendableSamples struct {
|
||||||
|
ts int64
|
||||||
|
h *histogram.Histogram
|
||||||
|
fh *histogram.FloatHistogram
|
||||||
|
ct int64 // 0 if no created timestamp.
|
||||||
|
}
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
appendableSamples []appendableSamples
|
||||||
|
expectedError error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "integer histogram CT lower than minValidTime initiates ErrOutOfBounds",
|
||||||
|
appendableSamples: []appendableSamples{
|
||||||
|
{ts: 100, h: tsdbutil.GenerateTestHistogram(1), ct: -1},
|
||||||
|
},
|
||||||
|
expectedError: storage.ErrOutOfBounds,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "float histograms CT lower than minValidTime initiates ErrOutOfBounds",
|
||||||
|
appendableSamples: []appendableSamples{
|
||||||
|
{ts: 100, fh: tsdbutil.GenerateTestFloatHistogram(1), ct: -1},
|
||||||
|
},
|
||||||
|
expectedError: storage.ErrOutOfBounds,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "integer histogram CT duplicates an existing sample",
|
||||||
|
appendableSamples: []appendableSamples{
|
||||||
|
{ts: 100, h: tsdbutil.GenerateTestHistogram(1)},
|
||||||
|
{ts: 200, h: tsdbutil.GenerateTestHistogram(1), ct: 100},
|
||||||
|
},
|
||||||
|
expectedError: storage.ErrDuplicateSampleForTimestamp,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "float histogram CT duplicates an existing sample",
|
||||||
|
appendableSamples: []appendableSamples{
|
||||||
|
{ts: 100, fh: tsdbutil.GenerateTestFloatHistogram(1)},
|
||||||
|
{ts: 200, fh: tsdbutil.GenerateTestFloatHistogram(1), ct: 100},
|
||||||
|
},
|
||||||
|
expectedError: storage.ErrDuplicateSampleForTimestamp,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
h, _ := newTestHead(t, DefaultBlockDuration, compression.None, false)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
require.NoError(t, h.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
lbls := labels.FromStrings("foo", "bar")
|
||||||
|
|
||||||
|
var ref storage.SeriesRef
|
||||||
|
for _, sample := range tc.appendableSamples {
|
||||||
|
a := h.Appender(context.Background())
|
||||||
|
var err error
|
||||||
|
if sample.ct != 0 {
|
||||||
|
ref, err = a.AppendHistogramCTZeroSample(ref, lbls, sample.ts, sample.ct, sample.h, sample.fh)
|
||||||
|
require.ErrorIs(t, err, tc.expectedError)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err = a.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, a.Commit())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
|
func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
|
||||||
// Use a chunk range of 1 here so that if we attempted to determine if the head
|
// Use a chunk range of 1 here so that if we attempted to determine if the head
|
||||||
// was compactable using default values for min and max times, `Head.compactable()`
|
// was compactable using default values for min and max times, `Head.compactable()`
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -2274,14 +2275,18 @@ func (m mockIndex) LabelValues(_ context.Context, name string, hints *storage.La
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, series := range m.series {
|
for _, series := range m.series {
|
||||||
|
matches := true
|
||||||
for _, matcher := range matchers {
|
for _, matcher := range matchers {
|
||||||
if matcher.Matches(series.l.Get(matcher.Name)) {
|
matches = matches && matcher.Matches(series.l.Get(matcher.Name))
|
||||||
// TODO(colega): shouldn't we check all the matchers before adding this to the values?
|
if !matches {
|
||||||
values = append(values, series.l.Get(name))
|
|
||||||
if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if matches && !slices.Contains(values, series.l.Get(name)) {
|
||||||
|
values = append(values, series.l.Get(name))
|
||||||
|
}
|
||||||
|
if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2392,7 +2397,7 @@ func (m mockIndex) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([
|
||||||
for _, series := range m.series {
|
for _, series := range m.series {
|
||||||
matches := true
|
matches := true
|
||||||
for _, matcher := range matchers {
|
for _, matcher := range matchers {
|
||||||
matches = matches || matcher.Matches(series.l.Get(matcher.Name))
|
matches = matches && matcher.Matches(series.l.Get(matcher.Name))
|
||||||
if !matches {
|
if !matches {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -264,6 +264,7 @@ func NewAPI(
|
||||||
acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
|
acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
|
||||||
otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool,
|
otlpEnabled, otlpDeltaToCumulative, otlpNativeDeltaIngestion bool,
|
||||||
ctZeroIngestionEnabled bool,
|
ctZeroIngestionEnabled bool,
|
||||||
|
lookbackDelta time.Duration,
|
||||||
) *API {
|
) *API {
|
||||||
a := &API{
|
a := &API{
|
||||||
QueryEngine: qe,
|
QueryEngine: qe,
|
||||||
|
@ -310,7 +311,11 @@ func NewAPI(
|
||||||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled)
|
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled)
|
||||||
}
|
}
|
||||||
if otlpEnabled {
|
if otlpEnabled {
|
||||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{ConvertDelta: otlpDeltaToCumulative, NativeDelta: otlpNativeDeltaIngestion})
|
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, registerer, ap, configFunc, remote.OTLPOptions{
|
||||||
|
ConvertDelta: otlpDeltaToCumulative,
|
||||||
|
NativeDelta: otlpNativeDeltaIngestion,
|
||||||
|
LookbackDelta: lookbackDelta,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return a
|
return a
|
||||||
|
|
|
@ -145,6 +145,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
|
5*time.Minute,
|
||||||
)
|
)
|
||||||
|
|
||||||
promRouter := route.New().WithPrefix("/api/v1")
|
promRouter := route.New().WithPrefix("/api/v1")
|
||||||
|
|
|
@ -156,7 +156,7 @@ func marshalSampleJSON(s promql.Sample, stream *jsoniter.Stream) {
|
||||||
stream.WriteObjectEnd()
|
stream.WriteObjectEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
// marshalFPointJSON writes `[ts, "1.234"]`.
|
// unsafeMarshalFPointJSON writes `[ts, "1.234"]`.
|
||||||
func unsafeMarshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
func unsafeMarshalFPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
p := *((*promql.FPoint)(ptr))
|
p := *((*promql.FPoint)(ptr))
|
||||||
marshalFPointJSON(p, stream)
|
marshalFPointJSON(p, stream)
|
||||||
|
@ -170,7 +170,7 @@ func marshalFPointJSON(p promql.FPoint, stream *jsoniter.Stream) {
|
||||||
stream.WriteArrayEnd()
|
stream.WriteArrayEnd()
|
||||||
}
|
}
|
||||||
|
|
||||||
// marshalHPointJSON writes `[ts, { < histogram, see jsonutil.MarshalHistogram > } ]`.
|
// unsafeMarshalHPointJSON writes `[ts, { < histogram, see jsonutil.MarshalHistogram > } ]`.
|
||||||
func unsafeMarshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
func unsafeMarshalHPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
||||||
p := *((*promql.HPoint)(ptr))
|
p := *((*promql.HPoint)(ptr))
|
||||||
marshalHPointJSON(p, stream)
|
marshalHPointJSON(p, stream)
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
/release/**/test/
|
/release/**/test/
|
||||||
/scripts/
|
/scripts/
|
||||||
/.circleci/
|
/.circleci/
|
||||||
/src/
|
|
||||||
/test/
|
/test/
|
||||||
/examples/
|
/examples/
|
||||||
/gulpfile.js
|
/gulpfile.js
|
||||||
|
|
|
@ -3,4 +3,3 @@ generate-types.sh
|
||||||
jest.config.cjs
|
jest.config.cjs
|
||||||
rollup.config.js
|
rollup.config.js
|
||||||
/test/
|
/test/
|
||||||
/src/
|
|
||||||
|
|
|
@ -392,6 +392,7 @@ func New(logger *slog.Logger, o *Options) *Handler {
|
||||||
o.ConvertOTLPDelta,
|
o.ConvertOTLPDelta,
|
||||||
o.NativeOTLPDeltaIngestion,
|
o.NativeOTLPDeltaIngestion,
|
||||||
o.CTZeroIngestionEnabled,
|
o.CTZeroIngestionEnabled,
|
||||||
|
o.LookbackDelta,
|
||||||
)
|
)
|
||||||
|
|
||||||
if o.RoutePrefix != "/" {
|
if o.RoutePrefix != "/" {
|
||||||
|
|
Loading…
Reference in New Issue