From bb769669923a830ae11f284905979017c2ac7a12 Mon Sep 17 00:00:00 2001 From: Lukasz Mierzwa Date: Tue, 15 Apr 2025 17:52:24 +0100 Subject: [PATCH 01/39] Use stringlabels by default This removes the stringlabels build tag, makes that implementation the default one, and moves the old labels implementation under the slicelabels build tag. Fixes #16064. Signed-off-by: Lukasz Mierzwa --- .github/workflows/ci.yml | 4 ++-- model/labels/labels.go | 4 ++-- model/labels/labels_stringlabels.go | 2 +- model/labels/sharding.go | 2 +- model/labels/sharding_stringlabels.go | 2 +- tsdb/agent/series_test.go | 10 +++++----- tsdb/head_test.go | 10 +++++----- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f2263689a3..f280e9ad78 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,8 +18,8 @@ jobs: - uses: ./.github/promci/actions/setup_environment with: enable_npm: true - - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false + - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 + - run: go test ./tsdb/ -test.tsdb-isolation=false - run: make -C documentation/examples/remote_storage - run: make -C documentation/examples diff --git a/model/labels/labels.go b/model/labels/labels.go index ed66d73cbf..5ebdf6a3fe 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !stringlabels && !dedupelabels +//go:build slicelabels package labels @@ -453,7 +453,7 @@ func (b *ScratchBuilder) Add(name, value string) { } // UnsafeAddBytes adds a name/value pair, using []byte instead of string. -// The '-tags stringlabels' version of this function is unsafe, hence the name. +// The default version of this function is unsafe, hence the name. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { b.add = append(b.add, Label{Name: string(name), Value: string(value)}) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index f49ed96f65..8d611492b5 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build stringlabels +//go:build !slicelabels && !dedupelabels package labels diff --git a/model/labels/sharding.go b/model/labels/sharding.go index 8b3a369397..ed05da675f 100644 --- a/model/labels/sharding.go +++ b/model/labels/sharding.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !stringlabels && !dedupelabels +//go:build slicelabels package labels diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go index 798f268eb9..4dcbaa21d1 100644 --- a/model/labels/sharding_stringlabels.go +++ b/model/labels/sharding_stringlabels.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build stringlabels +//go:build !slicelabels && !dedupelabels package labels diff --git a/tsdb/agent/series_test.go b/tsdb/agent/series_test.go index bc5a4af5d3..257f5815f9 100644 --- a/tsdb/agent/series_test.go +++ b/tsdb/agent/series_test.go @@ -77,13 +77,13 @@ func TestNoDeadlock(t *testing.T) { func labelsWithHashCollision() (labels.Labels, labels.Labels) { // These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions - ls1 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y") - ls2 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF") + ls1 := labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl") + ls2 := labels.FromStrings("__name__", "metric", "lbl", "RqcXatm") if ls1.Hash() != ls2.Hash() { - // These ones are the same when using -tags stringlabels - ls1 = labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl") - ls2 = labels.FromStrings("__name__", "metric", "lbl", "RqcXatm") + // These ones are the same when using -tags slicelabels + ls1 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y") + ls2 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF") } if ls1.Hash() != ls2.Hash() { diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 8cd44db841..100d5b1265 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -6286,13 +6286,13 @@ func TestHeadCompactionWhileAppendAndCommitExemplar(t *testing.T) { func labelsWithHashCollision() (labels.Labels, labels.Labels) { // These two series have the same XXHash; thanks to https://github.com/pstibrany/labels_hash_collisions - ls1 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y") - ls2 := labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF") + ls1 := labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl") + ls2 := labels.FromStrings("__name__", "metric", "lbl", "RqcXatm") if ls1.Hash() != ls2.Hash() { - // These ones are the same when using -tags stringlabels - ls1 = labels.FromStrings("__name__", "metric", "lbl", "HFnEaGl") - ls2 = labels.FromStrings("__name__", "metric", "lbl", "RqcXatm") + // These ones are the same when using -tags slicelabels + ls1 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "l6CQ5y") + ls2 = labels.FromStrings("__name__", "metric", "lbl1", "value", "lbl2", "v7uDlF") } if ls1.Hash() != ls2.Hash() { From 05088aaa1285be8c21ef652d704f431d9eae1734 Mon Sep 17 00:00:00 2001 From: Lukasz Mierzwa Date: Tue, 15 Apr 2025 18:02:34 +0100 Subject: [PATCH 02/39] Fix linter errors Mostly comment issues and unused variables. Signed-off-by: Lukasz Mierzwa --- model/labels/labels_stringlabels.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index 8d611492b5..fa0bd7bc27 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -76,7 +76,7 @@ func (ls Labels) IsZero() bool { // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. -// TODO: This is only used in printing an error message +// TODO: This is only used in printing an error message. func (ls Labels) MatchLabels(on bool, names ...string) Labels { b := NewBuilder(ls) if on { @@ -298,6 +298,7 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } + func yoloBytes(s string) []byte { return unsafe.Slice(unsafe.StringData(s), len(s)) } @@ -370,7 +371,7 @@ func Compare(a, b Labels) int { return +1 } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom will copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { ls.data = b.data // strings are immutable } @@ -440,11 +441,11 @@ func (ls Labels) DropMetricName() Labels { } // InternStrings is a no-op because it would only save when the whole set of labels is identical. -func (ls *Labels) InternStrings(intern func(string) string) { +func (ls *Labels) InternStrings(_ func(string) string) { } // ReleaseStrings is a no-op for the same reason as InternStrings. -func (ls Labels) ReleaseStrings(release func(string)) { +func (ls Labels) ReleaseStrings(_ func(string)) { } // Builder allows modifying Labels. @@ -561,7 +562,7 @@ func encodeVarint(data []byte, offset int, v uint64) int { return base } -// Special code for the common case that a size is less than 128 +// Special code for the common case that a size is less than 128. func encodeSize(data []byte, offset, v int) int { if v < 1<<7 { offset-- @@ -630,7 +631,7 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } -// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// UnsafeAddBytes adds a name/value pair using []byte instead of string to reduce memory allocations. // The values must remain live until Labels() is called. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) @@ -658,7 +659,7 @@ func (b *ScratchBuilder) Labels() Labels { return b.output } -// Write the newly-built Labels out to ls, reusing an internal buffer. +// Overwrite will write the newly-built Labels out to ls, reusing an internal buffer. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { size := labelsSize(b.add) @@ -671,7 +672,7 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) { ls.data = yoloString(b.overwriteBuffer) } -// Symbol-table is no-op, just for api parity with dedupelabels. +// SymbolTable is no-op, just for api parity with dedupelabels. type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } From bec3a125a07010087aed1ecdbdca65bae89848d4 Mon Sep 17 00:00:00 2001 From: Lukasz Mierzwa Date: Tue, 15 Apr 2025 18:21:57 +0100 Subject: [PATCH 03/39] Remove stringlabels from promu build tags Signed-off-by: Lukasz Mierzwa --- .promu.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.promu.yml b/.promu.yml index 23379cc1ef..d16bceeed9 100644 --- a/.promu.yml +++ b/.promu.yml @@ -14,10 +14,8 @@ build: all: - netgo - builtinassets - - stringlabels windows: - builtinassets - - stringlabels ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} From 0658923bf060c9b232452f4fcd3dd92a33b7a44b Mon Sep 17 00:00:00 2001 From: Zhang Zhanpeng Date: Thu, 24 Apr 2025 14:34:56 +0800 Subject: [PATCH 04/39] fix conversion from time range in seconds to time duration at parser Signed-off-by: Zhang Zhanpeng --- promql/parser/generated_parser.y | 10 ++--- promql/parser/generated_parser.y.go | 10 ++--- promql/parser/parse_test.go | 51 +++++++++++++++++++++++ promql/promqltest/testdata/functions.test | 41 ++++++++++++++++++ 4 files changed, 102 insertions(+), 10 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index de9234589c..d8fb311154 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -452,7 +452,7 @@ positive_duration_expr : duration_expr offset_expr: expr OFFSET duration_expr { if numLit, ok := $3.(*NumberLiteral); ok { - yylex.(*parser).addOffset($1, time.Duration(numLit.Val*1000)*time.Millisecond) + yylex.(*parser).addOffset($1, time.Duration(math.Round(numLit.Val*float64(time.Second)))) $$ = $1 break } @@ -506,7 +506,7 @@ matrix_selector : expr LEFT_BRACKET positive_duration_expr RIGHT_BRACKET var rangeNl time.Duration if numLit, ok := $3.(*NumberLiteral); ok { - rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond + rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second))) } rangeExpr, _ := $3.(*DurationExpr) $$ = &MatrixSelector{ @@ -523,11 +523,11 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati var rangeNl time.Duration var stepNl time.Duration if numLit, ok := $3.(*NumberLiteral); ok { - rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond + rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second))) } rangeExpr, _ := $3.(*DurationExpr) if numLit, ok := $5.(*NumberLiteral); ok { - stepNl = time.Duration(numLit.Val*1000)*time.Millisecond + stepNl = time.Duration(math.Round(numLit.Val*float64(time.Second))) } stepExpr, _ := $5.(*DurationExpr) $$ = &SubqueryExpr{ @@ -543,7 +543,7 @@ subquery_expr : expr LEFT_BRACKET positive_duration_expr COLON positive_durati { var rangeNl time.Duration if numLit, ok := $3.(*NumberLiteral); ok { - rangeNl = time.Duration(numLit.Val*1000)*time.Millisecond + rangeNl = time.Duration(math.Round(numLit.Val*float64(time.Second))) } rangeExpr, _ := $3.(*DurationExpr) $$ = &SubqueryExpr{ diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 8c84b42f14..7e7ec2dbd3 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -1372,7 +1372,7 @@ yydefault: yyDollar = yyS[yypt-3 : yypt+1] { if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { - yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(numLit.Val*1000)*time.Millisecond) + yylex.(*parser).addOffset(yyDollar[1].node, time.Duration(math.Round(numLit.Val*float64(time.Second)))) yyVAL.node = yyDollar[1].node break } @@ -1423,7 +1423,7 @@ yydefault: var rangeNl time.Duration if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { - rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond + rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second))) } rangeExpr, _ := yyDollar[3].node.(*DurationExpr) yyVAL.node = &MatrixSelector{ @@ -1439,11 +1439,11 @@ yydefault: var rangeNl time.Duration var stepNl time.Duration if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { - rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond + rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second))) } rangeExpr, _ := yyDollar[3].node.(*DurationExpr) if numLit, ok := yyDollar[5].node.(*NumberLiteral); ok { - stepNl = time.Duration(numLit.Val*1000) * time.Millisecond + stepNl = time.Duration(math.Round(numLit.Val * float64(time.Second))) } stepExpr, _ := yyDollar[5].node.(*DurationExpr) yyVAL.node = &SubqueryExpr{ @@ -1460,7 +1460,7 @@ yydefault: { var rangeNl time.Duration if numLit, ok := yyDollar[3].node.(*NumberLiteral); ok { - rangeNl = time.Duration(numLit.Val*1000) * time.Millisecond + rangeNl = time.Duration(math.Round(numLit.Val * float64(time.Second))) } rangeExpr, _ := yyDollar[3].node.(*DurationExpr) yyVAL.node = &SubqueryExpr{ diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index a09ccea9d6..2764dc52df 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2008,6 +2008,57 @@ var testExpr = []struct { errMsg: `unexpected "}" in label matching, expected string`, }, // Test matrix selector. + { + input: "test[1000ms]", + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "test", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 4, + }, + }, + Range: 1000 * time.Millisecond, + EndPos: 12, + }, + }, + { + input: "test[1001ms]", + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "test", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 4, + }, + }, + Range: 1001 * time.Millisecond, + EndPos: 12, + }, + }, + { + input: "test[1002ms]", + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "test", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 4, + }, + }, + Range: 1002 * time.Millisecond, + EndPos: 12, + }, + }, { input: "test[5s]", expected: &MatrixSelector{ diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index fafe2dda40..d37aeb53f6 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -1019,6 +1019,47 @@ eval instant at 1m sum_over_time(metric[2m]) eval instant at 1m avg_over_time(metric[2m]) {} 0.5 +# Test per-series aggregation on dense samples. +clear +load 1ms + metric 1+0x4000 + +eval instant at 4s sum_over_time(metric[1000ms]) + {} 1000 + +eval instant at 4s sum_over_time(metric[1001ms]) + {} 1001 + +eval instant at 4s sum_over_time(metric[1002ms]) + {} 1002 + +eval instant at 4s sum_over_time(metric[1003ms]) + {} 1003 + +eval instant at 4s sum_over_time(metric[2000ms]) + {} 2000 + +eval instant at 4s sum_over_time(metric[2001ms]) + {} 2001 + +eval instant at 4s sum_over_time(metric[2002ms]) + {} 2002 + +eval instant at 4s sum_over_time(metric[2003ms]) + {} 2003 + +eval instant at 4s sum_over_time(metric[3000ms]) + {} 3000 + +eval instant at 4s sum_over_time(metric[3001ms]) + {} 3001 + +eval instant at 4s sum_over_time(metric[3002ms]) + {} 3002 + +eval instant at 4s sum_over_time(metric[3003ms]) + {} 3003 + # Tests for stddev_over_time and stdvar_over_time. clear load 10s From 3877f5829e94b94b4348288e7b76971d5ac37b26 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Tue, 29 Apr 2025 16:20:32 +0100 Subject: [PATCH 05/39] Volunteer Bryan Boreham as Shepherd for Release 3.5 Signed-off-by: Bryan Boreham --- RELEASE.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 28abb5be0f..34890c0d29 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -14,7 +14,8 @@ Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/ | v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) | | v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) | | v3.4 | 2025-04-22 | Jan-Otto Kröpke (Github: @jkroepke)| -| v3.5 | 2025-06-03 | **volunteer welcome** | +| v3.5 LTS | 2025-06-03 | Bryan Boreham (GitHub: @bboreham) | +| v3.6 | 2025-07-15 | **volunteer welcome** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From 08982b177f37c29f7cf1cb654fc6aa8a11b58064 Mon Sep 17 00:00:00 2001 From: Jonas Lammler Date: Sun, 27 Apr 2025 13:09:38 +0200 Subject: [PATCH 06/39] Add `label_selector` to hetzner service discovery Allows to filter the servers when sending the listing request to the API. This feature is only available when using the `role=hcloud`. See https://docs.hetzner.cloud/#label-selector for details on how to use the label selector. Signed-off-by: Jonas Lammler --- discovery/hetzner/hcloud.go | 13 +++++++++---- discovery/hetzner/hetzner.go | 7 +++++-- docs/configuration/configuration.md | 4 ++++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index ba64250c0f..88fe09bd3e 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -53,14 +53,16 @@ const ( // the Discoverer interface. type hcloudDiscovery struct { *refresh.Discovery - client *hcloud.Client - port int + client *hcloud.Client + port int + labelSelector string } // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) { d := &hcloudDiscovery{ - port: conf.Port, + port: conf.Port, + labelSelector: conf.LabelSelector, } rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd") @@ -79,7 +81,10 @@ func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error } func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { - servers, err := d.client.Server.All(ctx) + servers, err := d.client.Server.AllWithOpts(ctx, hcloud.ServerListOpts{ListOpts: hcloud.ListOpts{ + PerPage: 50, + LabelSelector: d.labelSelector, + }}) if err != nil { return nil, err } diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 97d48f6d70..9245d933cc 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -59,8 +59,11 @@ type SDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` Port int `yaml:"port"` Role Role `yaml:"role"` - hcloudEndpoint string // For tests only. - robotEndpoint string // For tests only. + + LabelSelector string `yaml:"label_selector,omitempty"` + + hcloudEndpoint string // For tests only. + robotEndpoint string // For tests only. } // NewDiscovererMetrics implements discovery.Config. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index db7b307a1e..4cd4a677ab 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -1638,6 +1638,10 @@ role: # The time after which the servers are refreshed. [ refresh_interval: | default = 60s ] +# Label selector used to filter the servers when fetching them from the API. See https://docs.hetzner.cloud/#label-selector for more details. +# Only used when role is hcloud. +[ label_selector: ] + # HTTP client settings, including authentication methods (such as basic auth and # authorization), proxy configurations, TLS options, custom HTTP headers, etc. [ ] From 4eca0942f2fbdc23e575652aa6e4e1cb53efec1e Mon Sep 17 00:00:00 2001 From: Nicolas Peugnet Date: Wed, 30 Apr 2025 11:23:00 +0200 Subject: [PATCH 07/39] Clarify the docs of promtool --{min,max}-time options State explicitely what kind of timestamps are expected for the --min-time and --max-time options of promtool tsdb commands. This is especially important for the dump-openmetrics command as users could otherwise mistakenly think it would be in seconds, like the OpenMetrics timestamps themselves. Signed-off-by: Nicolas Peugnet --- cmd/promtool/main.go | 8 ++++---- docs/command-line/promtool.md | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 0304fa2d57..c6a5801d28 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -257,15 +257,15 @@ func main() { tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() - dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() - dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() + dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() + dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String() - dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() - dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() + dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() + dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump, in milliseconds since the Unix epoch.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.") diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 1c4c0a18f9..3b1ca84f6b 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -581,8 +581,8 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | | --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | -| --min-time | Minimum timestamp to dump. | `-9223372036854775808` | -| --max-time | Maximum timestamp to dump. | `9223372036854775807` | +| --min-time | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` | +| --max-time | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` | | --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -608,8 +608,8 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | | --sandbox-dir-root | Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end. | | -| --min-time | Minimum timestamp to dump. | `-9223372036854775808` | -| --max-time | Maximum timestamp to dump. | `9223372036854775807` | +| --min-time | Minimum timestamp to dump, in milliseconds since the Unix epoch. | `-9223372036854775808` | +| --max-time | Maximum timestamp to dump, in milliseconds since the Unix epoch. | `9223372036854775807` | | --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | From b2c2146d7c3d2f32b273c466dfa792d2b3bf56b2 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 30 Apr 2025 10:53:48 +0100 Subject: [PATCH 08/39] Labels: simpler/faster stringlabels encoding (#16069) Instead of using varint to encode the size of each label, use a single byte for size 0-254, or a flag value of 255 followed by the size in 3 bytes little-endian. This reduces the amount of code, and also the number of branches in commonly-executed code, so it runs faster. The maximum allowed label name or value length is now 2^24 or 16MB. Memory used by labels changes as follows: * Labels from 0 to 127 bytes length: same * From 128 to 254: 1 byte less * From 255 to 16383: 2 bytes more * From 16384 to 2MB: 1 byte more * From 2MB to 16MB: same Labels: panic on string too long. Slightly more user-friendly than encoding bad data and finding out when we decode. Clarify that Labels.Bytes() encoding can change --------- Signed-off-by: Bryan Boreham --- model/labels/labels.go | 4 +- model/labels/labels_dedupelabels.go | 4 +- model/labels/labels_stringlabels.go | 75 +++++++++-------------------- model/labels/labels_test.go | 10 ++++ 4 files changed, 38 insertions(+), 55 deletions(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index ed66d73cbf..d148490edb 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -32,8 +32,8 @@ func (ls Labels) Len() int { return len(ls) } func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } -// Bytes returns ls as a byte slice. -// It uses an byte invalid character as a separator and so should not be used for printing. +// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) b.WriteByte(labelSep) diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index a0d83e0044..38cf91301c 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -140,8 +140,8 @@ func decodeString(t *nameTable, data string, index int) (string, int) { return t.ToName(num), index } -// Bytes returns ls as a byte slice. -// It uses non-printing characters and so should not be used for printing. +// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) for i := 0; i < len(ls.data); { diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index f49ed96f65..dfb374f8dd 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -24,31 +24,25 @@ import ( ) // Labels is implemented by a single flat string holding name/value pairs. -// Each name and value is preceded by its length in varint encoding. +// Each name and value is preceded by its length, encoded as a single byte +// for size 0-254, or the following 3 bytes little-endian, if the first byte is 255. +// Maximum length allowed is 2^24 or 16MB. // Names are in order. type Labels struct { data string } func decodeSize(data string, index int) (int, int) { - // Fast-path for common case of a single byte, value 0..127. b := data[index] index++ - if b < 0x80 { - return int(b), index - } - size := int(b & 0x7F) - for shift := uint(7); ; shift += 7 { + if b == 255 { + // Larger numbers are encoded as 3 bytes little-endian. // Just panic if we go of the end of data, since all Labels strings are constructed internally and // malformed data indicates a bug, or memory corruption. - b := data[index] - index++ - size |= int(b&0x7F) << shift - if b < 0x80 { - break - } + return int(data[index]) + (int(data[index+1]) << 8) + (int(data[index+2]) << 16), index + 3 } - return size, index + // More common case of a single byte, value 0..254. + return int(b), index } func decodeString(data string, index int) (string, int) { @@ -57,8 +51,8 @@ func decodeString(data string, index int) (string, int) { return data[index : index+size], index + size } -// Bytes returns ls as a byte slice. -// It uses non-printing characters and so should not be used for printing. +// Bytes returns an opaque, not-human-readable, encoding of ls, usable as a map key. +// Encoding may change over time or between runs of Prometheus. func (ls Labels) Bytes(buf []byte) []byte { if cap(buf) < len(ls.data) { buf = make([]byte, len(ls.data)) @@ -527,48 +521,27 @@ func marshalLabelToSizedBuffer(m *Label, data []byte) int { return len(data) - i } -func sizeVarint(x uint64) (n int) { - // Most common case first - if x < 1<<7 { +func sizeWhenEncoded(x uint64) (n int) { + if x < 255 { return 1 + } else if x <= 1<<24 { + return 4 } - if x >= 1<<56 { - return 9 - } - if x >= 1<<28 { - x >>= 28 - n = 4 - } - if x >= 1<<14 { - x >>= 14 - n += 2 - } - if x >= 1<<7 { - n++ - } - return n + 1 + panic("String too long to encode as label.") } -func encodeVarint(data []byte, offset int, v uint64) int { - offset -= sizeVarint(v) - base := offset - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return base -} - -// Special code for the common case that a size is less than 128 func encodeSize(data []byte, offset, v int) int { - if v < 1<<7 { + if v < 255 { offset-- data[offset] = uint8(v) return offset } - return encodeVarint(data, offset, uint64(v)) + offset -= 4 + data[offset] = 255 + data[offset+1] = byte(v) + data[offset+2] = byte((v >> 8)) + data[offset+3] = byte((v >> 16)) + return offset } func labelsSize(lbls []Label) (n int) { @@ -582,9 +555,9 @@ func labelsSize(lbls []Label) (n int) { func labelSize(m *Label) (n int) { // strings are encoded as length followed by contents. l := len(m.Name) - n += l + sizeVarint(uint64(l)) + n += l + sizeWhenEncoded(uint64(l)) l = len(m.Value) - n += l + sizeVarint(uint64(l)) + n += l + sizeWhenEncoded(uint64(l)) return n } diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index a2a7734326..b7ba71b553 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -27,6 +27,8 @@ import ( ) func TestLabels_String(t *testing.T) { + s254 := strings.Repeat("x", 254) // Edge cases for stringlabels encoding. + s255 := strings.Repeat("x", 255) cases := []struct { labels Labels expected string @@ -43,6 +45,14 @@ func TestLabels_String(t *testing.T) { labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"), expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`, }, + { + labels: FromStrings("aaa", "111", "xx", s254), + expected: `{aaa="111", xx="` + s254 + `"}`, + }, + { + labels: FromStrings("aaa", "111", "xx", s255), + expected: `{aaa="111", xx="` + s255 + `"}`, + }, } for _, c := range cases { str := c.labels.String() From 477b55b8600407b8b64645f6d90a2d4515f99773 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Thu, 1 May 2025 08:35:25 +0100 Subject: [PATCH 09/39] [BUGFIX] Top-level: Update GOGC before loading TSDB (#16491) We should use the configured value, or Prometheus' default of 75%, while initializing and loading the WAL. Since the Go default is 100%, most Prometheus users would experience higher memory usage before the value is configured. Also: move Go runtime params earlier in initialization. E.g. if a module starting up looks at GOMAXPROCS to size something, we need to have set it already. --------- Signed-off-by: Bryan Boreham --- cmd/prometheus/main.go | 60 +++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 2623720bd1..4c3ede362e 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -651,6 +651,31 @@ func main() { cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow } + // Set Go runtime parameters before we get too far into initialization. + updateGoGC(cfgFile, logger) + if cfg.maxprocsEnable { + l := func(format string, a ...interface{}) { + logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") + } + if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { + logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err) + } + } + + if cfg.memlimitEnable { + if _, err := memlimit.SetGoMemLimitWithOpts( + memlimit.WithRatio(cfg.memlimitRatio), + memlimit.WithProvider( + memlimit.ApplyFallback( + memlimit.FromCgroup, + memlimit.FromSystem, + ), + ), + ); err != nil { + logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) + } + } + // Now that the validity of the config is established, set the config // success metrics accordingly, although the config isn't really loaded // yet. This will happen later (including setting these metrics again), @@ -801,29 +826,6 @@ func main() { ruleManager *rules.Manager ) - if cfg.maxprocsEnable { - l := func(format string, a ...interface{}) { - logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") - } - if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { - logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err) - } - } - - if cfg.memlimitEnable { - if _, err := memlimit.SetGoMemLimitWithOpts( - memlimit.WithRatio(cfg.memlimitRatio), - memlimit.WithProvider( - memlimit.ApplyFallback( - memlimit.FromCgroup, - memlimit.FromSystem, - ), - ), - ); err != nil { - logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) - } - } - if !agentMode { opts := promql.EngineOpts{ Logger: logger.With("component", "query engine"), @@ -1509,6 +1511,14 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) } + updateGoGC(conf, logger) + + noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) + timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)) + return nil +} + +func updateGoGC(conf *config.Config, logger *slog.Logger) { oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) if oldGoGC != conf.Runtime.GoGC { logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) @@ -1519,10 +1529,6 @@ func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logg } else { os.Setenv("GOGC", "off") } - - noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) - timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)) - return nil } func startsOrEndsWithQuote(s string) bool { From b6aaea22fbbab8e68271a6c1fe01fe3ae57dea18 Mon Sep 17 00:00:00 2001 From: Graham Reed Date: Thu, 1 May 2025 08:58:12 +0100 Subject: [PATCH 10/39] promtool: Optional fuzzy float64 comparison in rules unittests (#16395) Make fuzzy compare opt-in via fuzzy_compare boolean in each unittest file. Signed-off-by: Graham Reed --- cmd/promtool/testdata/rules_run_fuzzy.yml | 43 ++++++++++++++++++++ cmd/promtool/testdata/rules_run_no_fuzzy.yml | 24 +++++++++++ cmd/promtool/unittest.go | 18 ++++++-- cmd/promtool/unittest_test.go | 23 +++++++++++ docs/configuration/unit_testing_rules.md | 16 +++++--- 5 files changed, 114 insertions(+), 10 deletions(-) create mode 100644 cmd/promtool/testdata/rules_run_fuzzy.yml create mode 100644 cmd/promtool/testdata/rules_run_no_fuzzy.yml diff --git a/cmd/promtool/testdata/rules_run_fuzzy.yml b/cmd/promtool/testdata/rules_run_fuzzy.yml new file mode 100644 index 0000000000..3bf4e47a45 --- /dev/null +++ b/cmd/promtool/testdata/rules_run_fuzzy.yml @@ -0,0 +1,43 @@ +# Minimal test case to see that fuzzy compare is working as expected. +# It should allow slight floating point differences through. Larger +# floating point differences should still fail. + +evaluation_interval: 1m +fuzzy_compare: true + +tests: + - name: correct fuzzy match + input_series: + - series: test_low + values: 2.9999999999999996 + - series: test_high + values: 3.0000000000000004 + promql_expr_test: + - expr: test_low + eval_time: 0 + exp_samples: + - labels: test_low + value: 3 + - expr: test_high + eval_time: 0 + exp_samples: + - labels: test_high + value: 3 + + - name: wrong fuzzy match + input_series: + - series: test_low + values: 2.9999999999999987 + - series: test_high + values: 3.0000000000000013 + promql_expr_test: + - expr: test_low + eval_time: 0 + exp_samples: + - labels: test_low + value: 3 + - expr: test_high + eval_time: 0 + exp_samples: + - labels: test_high + value: 3 diff --git a/cmd/promtool/testdata/rules_run_no_fuzzy.yml b/cmd/promtool/testdata/rules_run_no_fuzzy.yml new file mode 100644 index 0000000000..eba201a28c --- /dev/null +++ b/cmd/promtool/testdata/rules_run_no_fuzzy.yml @@ -0,0 +1,24 @@ +# Minimal test case to see that fuzzy compare can be turned off, +# and slight floating point differences fail matching. + +evaluation_interval: 1m +fuzzy_compare: false + +tests: + - name: correct fuzzy match + input_series: + - series: test_low + values: 2.9999999999999996 + - series: test_high + values: 3.0000000000000004 + promql_expr_test: + - expr: test_low + eval_time: 0 + exp_samples: + - labels: test_low + value: 3 + - expr: test_high + eval_time: 0 + exp_samples: + - labels: test_high + value: 3 diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 7a97a466a6..9bc1af1f61 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "math" "os" "path/filepath" "sort" @@ -130,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } - ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.RuleFiles...) + ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.FuzzyCompare, unitTestInp.RuleFiles...) if ers != nil { for _, e := range ers { tc.Fail(e.Error()) @@ -159,6 +160,7 @@ type unitTestFile struct { EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` GroupEvalOrder []string `yaml:"group_eval_order"` Tests []testGroup `yaml:"tests"` + FuzzyCompare bool `yaml:"fuzzy_compare,omitempty"` } // resolveAndGlobFilepaths joins all relative paths in a configuration @@ -197,7 +199,7 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields, fuzzyCompare bool, ruleFiles ...string) (outErr []error) { if debug { testStart := time.Now() fmt.Printf("DEBUG: Starting test %s\n", testname) @@ -237,6 +239,14 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde mint := time.Unix(0, 0).UTC() maxt := mint.Add(tg.maxEvalTime()) + // Optional floating point compare fuzzing. + var compareFloat64 cmp.Option = cmp.Options{} + if fuzzyCompare { + compareFloat64 = cmp.Comparer(func(x, y float64) bool { + return x == y || math.Nextafter(x, math.Inf(-1)) == y || math.Nextafter(x, math.Inf(1)) == y + }) + } + // Pre-processing some data for testing alerts. // All this preparation is so that we can test alerts as we evaluate the rules. // This avoids storing them in memory, as the number of evals might be high. @@ -374,7 +384,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde sort.Sort(gotAlerts) sort.Sort(expAlerts) - if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal)) { + if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal), compareFloat64) { var testName string if tg.TestGroupName != "" { testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName) @@ -482,7 +492,7 @@ Outer: sort.Slice(gotSamples, func(i, j int) bool { return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 }) - if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal)) { + if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal), compareFloat64) { errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 7466b222ca..566e0acbc6 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -240,6 +240,29 @@ func TestRulesUnitTestRun(t *testing.T) { ignoreUnknownFields: true, want: 0, }, + { + name: "Test precise floating point comparison expected failure", + args: args{ + files: []string{"./testdata/rules_run_no_fuzzy.yml"}, + }, + want: 1, + }, + { + name: "Test fuzzy floating point comparison correct match", + args: args{ + run: []string{"correct"}, + files: []string{"./testdata/rules_run_fuzzy.yml"}, + }, + want: 0, + }, + { + name: "Test fuzzy floating point comparison wrong match", + args: args{ + run: []string{"wrong"}, + files: []string{"./testdata/rules_run_fuzzy.yml"}, + }, + want: 1, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index 7fc676a251..ccf1961f48 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -24,6 +24,10 @@ rule_files: [ evaluation_interval: | default = 1m ] +# Setting fuzzy_compare true will very slightly weaken floating point comparisons. +# This will (effectively) ignore differences in the last bit of the mantissa. +[ fuzzy_compare: | default = false ] + # The order in which group names are listed below will be the order of evaluation of # rule groups (at a given evaluation time). The order is guaranteed only for the groups mentioned below. # All the groups need not be mentioned below. @@ -95,20 +99,20 @@ series: # {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}} # Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. # All properties are optional and default to 0. The order is not important. The following properties are supported: -# - schema (int): +# - schema (int): # Currently valid schema numbers are -4 <= n <= 8. They are all for # base-2 bucket schemas, where 1 is a bucket boundary in each case, and # then each power of two is divided into 2^n logarithmic buckets. Or # in other words, each bucket boundary is the previous boundary times # 2^(2^-n). -# - sum (float): +# - sum (float): # The sum of all observations, including the zero bucket. -# - count (non-negative float): +# - count (non-negative float): # The number of observations, including those that are NaN and including the zero bucket. -# - z_bucket (non-negative float): +# - z_bucket (non-negative float): # The sum of all observations in the zero bucket. -# - z_bucket_w (non-negative float): -# The width of the zero bucket. +# - z_bucket_w (non-negative float): +# The width of the zero bucket. # If z_bucket_w > 0, the zero bucket contains all observations -z_bucket_w <= x <= z_bucket_w. # Otherwise, the zero bucket only contains observations that are exactly 0. # - buckets (list of non-negative floats): From 9dcc160049ca13fb692ae57eb568cb0319a973d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 00:14:05 +0000 Subject: [PATCH 11/39] chore(deps): bump github/codeql-action from 3.28.13 to 3.28.16 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.13 to 3.28.16. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/1b549b9259bda1cb5ddde3b41741a82a2d15a841...28deaeda66b76a05916b6923827895f2b14ab387) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.16 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a67dea0320..1007b2e8f9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Initialize CodeQL - uses: github/codeql-action/init@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # v3.28.13 + uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 75db65efe0..c2335a8e46 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@1b549b9259bda1cb5ddde3b41741a82a2d15a841 # tag=v3.28.13 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # tag=v3.28.16 with: sarif_file: results.sarif From 80587855280c2bed19eee8f28c717142513e40b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 00:14:13 +0000 Subject: [PATCH 12/39] chore(deps): bump github.com/ionos-cloud/sdk-go/v6 from 6.3.3 to 6.3.4 Bumps [github.com/ionos-cloud/sdk-go/v6](https://github.com/ionos-cloud/sdk-go) from 6.3.3 to 6.3.4. - [Release notes](https://github.com/ionos-cloud/sdk-go/releases) - [Changelog](https://github.com/ionos-cloud/sdk-go/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/ionos-cloud/sdk-go/compare/v6.3.3...v6.3.4) --- updated-dependencies: - dependency-name: github.com/ionos-cloud/sdk-go/v6 dependency-version: 6.3.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ea4052c9ff..3e5d47d17e 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/hashicorp/consul/api v1.32.0 github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec github.com/hetznercloud/hcloud-go/v2 v2.21.0 - github.com/ionos-cloud/sdk-go/v6 v6.3.3 + github.com/ionos-cloud/sdk-go/v6 v6.3.4 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.18.0 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b diff --git a/go.sum b/go.sum index b9ed9a97c2..3ce5ff4e06 100644 --- a/go.sum +++ b/go.sum @@ -253,8 +253,8 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hetznercloud/hcloud-go/v2 v2.21.0 h1:wUpQT+fgAxIcdMtFvuCJ78ziqc/VARubpOQPQyj4Q84= github.com/hetznercloud/hcloud-go/v2 v2.21.0/go.mod h1:WSM7w+9tT86sJTNcF8a/oHljC3HUmQfcLxYsgx6PpSc= -github.com/ionos-cloud/sdk-go/v6 v6.3.3 h1:q33Sw1ZqsvqDkFaKG53dGk7BCOvPCPbGZpYqsF6tdjw= -github.com/ionos-cloud/sdk-go/v6 v6.3.3/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48= +github.com/ionos-cloud/sdk-go/v6 v6.3.4 h1:jTvGl4LOF8v8OYoEIBNVwbFoqSGAFqn6vGE7sp7/BqQ= +github.com/ionos-cloud/sdk-go/v6 v6.3.4/go.mod h1:wCVwNJ/21W29FWFUv+fNawOTMlFoP1dS3L+ZuztFW48= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= From 13b07cc145f4bdb4945e9b216ef615feb1c02489 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 00:14:28 +0000 Subject: [PATCH 13/39] chore(deps): bump the go-opentelemetry-io group across 2 directories with 5 updates Bumps the go-opentelemetry-io group with 4 updates in the / directory: [go.opentelemetry.io/collector/component](https://github.com/open-telemetry/opentelemetry-collector), [go.opentelemetry.io/collector/consumer](https://github.com/open-telemetry/opentelemetry-collector), [go.opentelemetry.io/collector/processor](https://github.com/open-telemetry/opentelemetry-collector) and [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Bumps the go-opentelemetry-io group with 2 updates in the /documentation/examples/remote_storage directory: [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) and [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Updates `go.opentelemetry.io/collector/component` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.30.0...pdata/v1.31.0) Updates `go.opentelemetry.io/collector/consumer` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.30.0...pdata/v1.31.0) Updates `go.opentelemetry.io/collector/pdata` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.30.0...pdata/v1.31.0) Updates `go.opentelemetry.io/collector/processor` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.30.0...pdata/v1.31.0) Updates `go.opentelemetry.io/collector/semconv` from 0.124.0 to 0.125.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.124.0...v0.125.0) Updates `go.opentelemetry.io/collector/pdata` from 1.30.0 to 1.31.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.30.0...pdata/v1.31.0) Updates `go.opentelemetry.io/collector/semconv` from 0.124.0 to 0.125.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.124.0...v0.125.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/component dependency-version: 1.31.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/consumer dependency-version: 1.31.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/pdata dependency-version: 1.31.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/processor dependency-version: 1.31.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-version: 0.125.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/pdata dependency-version: 1.31.0 dependency-type: indirect update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-version: 0.125.0 dependency-type: indirect update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] --- documentation/examples/remote_storage/go.mod | 18 +++---- documentation/examples/remote_storage/go.sum | 52 ++++++++++---------- go.mod | 16 +++--- go.sum | 44 ++++++++--------- 4 files changed, 65 insertions(+), 65 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index af0afd84de..6665b51fe5 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -51,22 +51,22 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/pdata v1.30.0 // indirect - go.opentelemetry.io/collector/semconv v0.124.0 // indirect + go.opentelemetry.io/collector/pdata v1.31.0 // indirect + go.opentelemetry.io/collector/semconv v0.125.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect go.opentelemetry.io/otel v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.26.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.7.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/grpc v1.71.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect + google.golang.org/grpc v1.72.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 1a7d8a6c83..f581fc1a6c 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -41,8 +41,8 @@ github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvF github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q= -github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -318,10 +318,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/pdata v1.30.0 h1:j3jyq9um436r6WzWySzexP2nLnFdmL5uVBYAlyr9nDM= -go.opentelemetry.io/collector/pdata v1.30.0/go.mod h1:0Bxu1ktuj4wE7PIASNSvd0SdBscQ1PLtYasymJ13/Cs= -go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA= -go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= +go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU= +go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk= +go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s= +go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= @@ -344,8 +344,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -365,20 +365,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -394,17 +394,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -419,12 +419,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= -google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI= -google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/go.mod b/go.mod index ea4052c9ff..c463064707 100644 --- a/go.mod +++ b/go.mod @@ -58,11 +58,11 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.10.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/component v1.30.0 - go.opentelemetry.io/collector/consumer v1.30.0 - go.opentelemetry.io/collector/pdata v1.30.0 - go.opentelemetry.io/collector/processor v1.30.0 - go.opentelemetry.io/collector/semconv v0.124.0 + go.opentelemetry.io/collector/component v1.31.0 + go.opentelemetry.io/collector/consumer v1.31.0 + go.opentelemetry.io/collector/pdata v1.31.0 + go.opentelemetry.io/collector/processor v1.31.0 + go.opentelemetry.io/collector/semconv v0.125.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 go.opentelemetry.io/otel v1.35.0 @@ -97,8 +97,8 @@ require ( github.com/hashicorp/go-version v1.7.0 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect - go.opentelemetry.io/collector/featuregate v1.30.0 // indirect - go.opentelemetry.io/collector/internal/telemetry v0.124.0 // indirect + go.opentelemetry.io/collector/featuregate v1.31.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.125.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect go.opentelemetry.io/otel/log v0.11.0 // indirect ) @@ -199,7 +199,7 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/collector/confmap v1.30.0 // indirect go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect - go.opentelemetry.io/collector/pipeline v0.124.0 // indirect + go.opentelemetry.io/collector/pipeline v0.125.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.37.0 // indirect diff --git a/go.sum b/go.sum index b9ed9a97c2..88246f2b06 100644 --- a/go.sum +++ b/go.sum @@ -488,8 +488,8 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/component v1.30.0 h1:HXjqBHaQ47/EEuWdnkjr4Y3kRWvmyWIDvqa1Q262Fls= -go.opentelemetry.io/collector/component v1.30.0/go.mod h1:vfM9kN+BM6oHBXWibquiprz8CVawxd4/aYy3nbhme3E= +go.opentelemetry.io/collector/component v1.31.0 h1:9LzU8X1RhV3h8/QsAoTX23aFUfoJ3EUc9O/vK+hFpSI= +go.opentelemetry.io/collector/component v1.31.0/go.mod h1:JbZl/KywXJxpUXPbt96qlEXJSym1zQ2hauMxYMuvlxM= go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4= go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38= go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw= @@ -498,32 +498,32 @@ go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7 go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA= go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8= go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0= -go.opentelemetry.io/collector/consumer v1.30.0 h1:Nn6kFTH+EJbv13E0W+sNvWrTgbiFCRv8f6DaA2F1DQs= -go.opentelemetry.io/collector/consumer v1.30.0/go.mod h1:edRyfk61ugdhCQ93PBLRZfYMVWjdMPpKP8z5QLyESf0= -go.opentelemetry.io/collector/consumer/consumertest v0.124.0 h1:2arChG4RPrHW3lfVWlK/KDF7Y7qkUm/YAiBXh8oTue0= -go.opentelemetry.io/collector/consumer/consumertest v0.124.0/go.mod h1:Hlu+EXbINHxVAyIT1baKO2d0j5odR3fLlLAiaP+JqQg= -go.opentelemetry.io/collector/consumer/xconsumer v0.124.0 h1:/cut96EWVNoz6lIeGI9+EzS6UClMtnZkx5YIpkD0Xe0= -go.opentelemetry.io/collector/consumer/xconsumer v0.124.0/go.mod h1:fHH/MpzFCRNk/4foiYE6BoXQCAMf5sJTO35uvzVrrd4= -go.opentelemetry.io/collector/featuregate v1.30.0 h1:mx7+iP/FQnY7KO8qw/xE3Qd1MQkWcU8VgcqLNrJ8EU8= -go.opentelemetry.io/collector/featuregate v1.30.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= -go.opentelemetry.io/collector/internal/telemetry v0.124.0 h1:kzd1/ZYhLj4bt2pDB529mL4rIRrRacemXodFNxfhdWk= -go.opentelemetry.io/collector/internal/telemetry v0.124.0/go.mod h1:ZjXjqV0dJ+6D4XGhTOxg/WHjnhdmXsmwmUSgALea66Y= -go.opentelemetry.io/collector/pdata v1.30.0 h1:j3jyq9um436r6WzWySzexP2nLnFdmL5uVBYAlyr9nDM= -go.opentelemetry.io/collector/pdata v1.30.0/go.mod h1:0Bxu1ktuj4wE7PIASNSvd0SdBscQ1PLtYasymJ13/Cs= -go.opentelemetry.io/collector/pdata/pprofile v0.124.0 h1:ZjL9wKqzP4BHj0/F1jfGxs1Va8B7xmYayipZeNVoWJE= -go.opentelemetry.io/collector/pdata/pprofile v0.124.0/go.mod h1:1EN3Gw5LSI4fSVma/Yfv/6nqeuYgRTm1/kmG5nE5Oyo= +go.opentelemetry.io/collector/consumer v1.31.0 h1:L+y66ywxLHnAxnUxv0JDwUf5bFj53kMxCCyEfRKlM7s= +go.opentelemetry.io/collector/consumer v1.31.0/go.mod h1:rPsqy5ni+c6xNMUkOChleZYO/nInVY6eaBNZ1FmWJVk= +go.opentelemetry.io/collector/consumer/consumertest v0.125.0 h1:TUkxomGS4DAtjBvcWQd2UY4FDLLEKMQD6iOIDUr/5dM= +go.opentelemetry.io/collector/consumer/consumertest v0.125.0/go.mod h1:vkHf3y85cFLDHARO/cTREVjLjOPAV+cQg7lkC44DWOY= +go.opentelemetry.io/collector/consumer/xconsumer v0.125.0 h1:oTreUlk1KpMSWwuHFnstW+orrjGTyvs2xd3o/Dpy+hI= +go.opentelemetry.io/collector/consumer/xconsumer v0.125.0/go.mod h1:FX0G37r0W+wXRgxxFtwEJ4rlsCB+p0cIaxtU3C4hskw= +go.opentelemetry.io/collector/featuregate v1.31.0 h1:20q7plPQZwmAiaYAa6l1m/i2qDITZuWlhjr4EkmeQls= +go.opentelemetry.io/collector/featuregate v1.31.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.125.0 h1:6lcGOxw3dAg7LfXTKdN8ZjR+l7KvzLdEiPMhhLwG4r4= +go.opentelemetry.io/collector/internal/telemetry v0.125.0/go.mod h1:5GyFslLqjZgq1DZTtFiluxYhhXrCofHgOOOybodDPGE= +go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4Isv+BLTBqU= +go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk= +go.opentelemetry.io/collector/pdata/pprofile v0.125.0 h1:Qqlx8w1HpiYZ9RQqjmMQIysI0cHNO1nh3E/fCTeFysA= +go.opentelemetry.io/collector/pdata/pprofile v0.125.0/go.mod h1:p/yK023VxAp8hm27/1G5DPTcMIpnJy3cHGAFUQZGyaQ= go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY= go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o= -go.opentelemetry.io/collector/pipeline v0.124.0 h1:hKvhDyH2GPnNO8LGL34ugf36sY7EOXPjBvlrvBhsOdw= -go.opentelemetry.io/collector/pipeline v0.124.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= -go.opentelemetry.io/collector/processor v1.30.0 h1:dxmu+sO6MzQydyrf2CON5Hm1KU7yV4ofH1stmreUtPk= -go.opentelemetry.io/collector/processor v1.30.0/go.mod h1:DjXAgelT8rfIWCTJP5kiPpxPqz4JLE1mJwsE2kJMTk8= +go.opentelemetry.io/collector/pipeline v0.125.0 h1:oitBgcAFqntDB4ihQJUHJSQ8IHqKFpPkaTVbTYdIUzM= +go.opentelemetry.io/collector/pipeline v0.125.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.31.0 h1:+u7sBUpnCBsHYoALp4hfr9VEjLHHYa4uKENGITe0K9Q= +go.opentelemetry.io/collector/processor v1.31.0/go.mod h1:5hDYJ7/hTdfd2tF2Rj5Hs6+mfyFz2O7CaPzVvW1qHQc= go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE= go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0= go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w= go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI= -go.opentelemetry.io/collector/semconv v0.124.0 h1:YTdo3UFwNyDQCh9DiSm2rbzAgBuwn/9dNZ0rv454goA= -go.opentelemetry.io/collector/semconv v0.124.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= +go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s= +go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU= go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM= From 874fce920d4102538ebefb886c1c07c06a5847f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 00:14:48 +0000 Subject: [PATCH 14/39] chore(deps): bump github.com/digitalocean/godo from 1.144.0 to 1.145.0 Bumps [github.com/digitalocean/godo](https://github.com/digitalocean/godo) from 1.144.0 to 1.145.0. - [Release notes](https://github.com/digitalocean/godo/releases) - [Changelog](https://github.com/digitalocean/godo/blob/main/CHANGELOG.md) - [Commits](https://github.com/digitalocean/godo/compare/v1.144.0...v1.145.0) --- updated-dependencies: - dependency-name: github.com/digitalocean/godo dependency-version: 1.145.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ea4052c9ff..8cca9bec93 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.144.0 + github.com/digitalocean/godo v1.145.0 github.com/docker/docker v28.1.1+incompatible github.com/edsrzf/mmap-go v1.2.0 github.com/envoyproxy/go-control-plane/envoy v1.32.4 diff --git a/go.sum b/go.sum index b9ed9a97c2..2881c27a8f 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.144.0 h1:rDCsmpwcDe5egFQ3Ae45HTde685/GzX037mWRMPufW0= -github.com/digitalocean/godo v1.144.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= +github.com/digitalocean/godo v1.145.0 h1:xBhWr+vCBy7GsexCUsWC+dKhPAWBMRLazavvXwyPBp8= +github.com/digitalocean/godo v1.145.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= From ae217439068fe28335dbb9c0f3afb6d54795b8d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 00:15:03 +0000 Subject: [PATCH 15/39] chore(deps): bump google.golang.org/api from 0.230.0 to 0.231.0 Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.230.0 to 0.231.0. - [Release notes](https://github.com/googleapis/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.230.0...v0.231.0) --- updated-dependencies: - dependency-name: google.golang.org/api dependency-version: 0.231.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index ea4052c9ff..1d125c65f9 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( golang.org/x/sync v0.13.0 golang.org/x/sys v0.32.0 golang.org/x/text v0.24.0 - google.golang.org/api v0.230.0 + google.golang.org/api v0.231.0 google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb google.golang.org/grpc v1.72.0 google.golang.org/protobuf v1.36.6 @@ -104,7 +104,7 @@ require ( ) require ( - cloud.google.com/go/auth v0.16.0 // indirect + cloud.google.com/go/auth v0.16.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect @@ -209,7 +209,7 @@ require ( golang.org/x/term v0.31.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.32.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index b9ed9a97c2..4c55b215d2 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU= -cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= +cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= @@ -645,12 +645,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM= -google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ= +google.golang.org/api v0.231.0 h1:LbUD5FUl0C4qwia2bjXhCMH65yz1MLPzA/0OYEsYY7Q= +google.golang.org/api v0.231.0/go.mod h1:H52180fPI/QQlUc0F4xWfGZILdv09GCWKt2bcsn164A= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197 h1:29cjnHVylHwTzH66WfFZqgSQgnxzvWE+jvBwpZCLRxY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250425173222-7b384671a197/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= From 1a190d36933e49b95438657f7241d2a56ae79012 Mon Sep 17 00:00:00 2001 From: machine424 Date: Thu, 1 May 2025 17:11:16 +0200 Subject: [PATCH 16/39] chore(cmd): Add info log for automemlimit config Signed-off-by: machine424 --- cmd/prometheus/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 4c3ede362e..0e547deaf9 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -671,6 +671,7 @@ func main() { memlimit.FromSystem, ), ), + memlimit.WithLogger(logger.With("component", "automemlimit")), ); err != nil { logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) } From e7e3ab282431de827ee7d03d27d056551a134e6a Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sat, 3 May 2025 19:05:13 +0200 Subject: [PATCH 17/39] Fix linting issues found by golangci-lint v2.0.2 (#16368) * Fix linting issues found by golangci-lint v2.0.2 --------- Signed-off-by: Arve Knudsen --- .golangci.yml | 22 ------- cmd/prometheus/main_test.go | 4 +- cmd/prometheus/query_log_test.go | 2 +- cmd/promtool/main_test.go | 12 ++-- cmd/promtool/unittest.go | 8 +-- discovery/aws/lightsail.go | 1 + discovery/consul/consul_test.go | 4 +- discovery/kubernetes/pod.go | 2 +- discovery/linode/linode.go | 2 +- discovery/manager_test.go | 20 +++---- discovery/marathon/marathon_test.go | 38 ++++++------ discovery/moby/docker.go | 5 +- discovery/scaleway/instance.go | 5 +- discovery/uyuni/uyuni.go | 4 ++ .../remote_storage_adapter/main.go | 2 +- .../opentsdb/tagvalue.go | 2 +- model/histogram/float_histogram.go | 2 +- model/labels/labels_test.go | 2 +- prompb/io/prometheus/client/decoder.go | 60 +++++++++---------- prompb/rwcommon/codec_test.go | 8 +-- promql/bench_test.go | 10 ++-- promql/engine.go | 2 +- promql/parser/lex.go | 6 +- rules/manager_test.go | 14 ++--- scrape/scrape_test.go | 22 +++---- scrape/target.go | 2 +- storage/merge.go | 12 ++-- .../prometheusremotewrite/helper.go | 2 +- .../prometheusremotewrite/metrics_to_prw.go | 4 +- storage/remote/queue_manager.go | 6 +- storage/remote/read.go | 2 +- storage/remote/read_handler_test.go | 2 +- storage/series_test.go | 8 +-- tsdb/agent/db_test.go | 2 +- tsdb/chunks/head_chunks_test.go | 14 ++--- tsdb/compact_test.go | 2 +- tsdb/db_test.go | 20 +++---- tsdb/head_test.go | 4 +- tsdb/index/index_test.go | 2 +- tsdb/ooo_head_read_test.go | 4 +- tsdb/ooo_head_test.go | 4 +- tsdb/querier.go | 2 +- tsdb/querier_test.go | 2 +- tsdb/testutil.go | 2 +- util/annotations/annotations.go | 3 +- util/documentcli/documentcli.go | 14 ++--- util/stats/query_stats.go | 4 +- util/strutil/strconv.go | 6 +- web/web.go | 2 +- web/web_test.go | 2 +- 50 files changed, 178 insertions(+), 208 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index dd87e3fedb..37b488f812 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -164,31 +164,9 @@ linters: - name: unused-parameter - name: var-declaration - name: var-naming - staticcheck: - checks: - - all # Enable all checks. - # FIXME: We should enable this check once we have fixed all the issues. - - -QF1001 - - -QF1002 - - -QF1003 - - -QF1006 - - -QF1007 - - -QF1008 - - -QF1009 - - -QF1010 - - -QF1012 - - -ST1000 - - -ST1003 - - -ST1005 - - -ST1012 - - -ST1016 - - -ST1020 testifylint: disable: - - empty # FIXME - - equal-values # FIXME - float-compare - - formatter # FIXME - go-require - len # FIXME - useless-assert # FIXME: wait for golangci-lint > v2.0.2 diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 0d0ab56eb4..5bb72dd2c2 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -268,7 +268,7 @@ func TestWALSegmentSizeBounds(t *testing.T) { go func() { done <- prom.Wait() }() select { case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) + t.Fatalf("prometheus should be still running: %v", err) case <-time.After(startupTime): prom.Process.Kill() <-done @@ -332,7 +332,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { go func() { done <- prom.Wait() }() select { case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) + t.Fatalf("prometheus should be still running: %v", err) case <-time.After(startupTime): prom.Process.Kill() <-done diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 25abf5e965..c63be02d4d 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -88,7 +88,7 @@ func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) { _, err = p.configFile.Seek(0, 0) require.NoError(t, err) if queryLogFile != "" { - _, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile))) + _, err = fmt.Fprintf(p.configFile, "global:\n query_log_file: %s\n", queryLogFile) require.NoError(t, err) } _, err = p.configFile.Write([]byte(p.configuration())) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index d1390f0d67..f922d18c4e 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -510,7 +510,7 @@ func TestCheckRules(t *testing.T) { os.Stdin = r exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) - require.Equal(t, successExitCode, exitCode, "") + require.Equal(t, successExitCode, exitCode) }) t.Run("rules-bad", func(t *testing.T) { @@ -532,7 +532,7 @@ func TestCheckRules(t *testing.T) { os.Stdin = r exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) - require.Equal(t, failureExitCode, exitCode, "") + require.Equal(t, failureExitCode, exitCode) }) t.Run("rules-lint-fatal", func(t *testing.T) { @@ -554,7 +554,7 @@ func TestCheckRules(t *testing.T) { os.Stdin = r exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false)) - require.Equal(t, lintErrExitCode, exitCode, "") + require.Equal(t, lintErrExitCode, exitCode) }) } @@ -572,19 +572,19 @@ func TestCheckRulesWithRuleFiles(t *testing.T) { t.Run("rules-good", func(t *testing.T) { t.Parallel() exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml") - require.Equal(t, successExitCode, exitCode, "") + require.Equal(t, successExitCode, exitCode) }) t.Run("rules-bad", func(t *testing.T) { t.Parallel() exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml") - require.Equal(t, failureExitCode, exitCode, "") + require.Equal(t, failureExitCode, exitCode) }) t.Run("rules-lint-fatal", func(t *testing.T) { t.Parallel() exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml") - require.Equal(t, lintErrExitCode, exitCode, "") + require.Equal(t, lintErrExitCode, exitCode) }) } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 9bc1af1f61..4910a0b1a6 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -321,12 +321,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde return errs } - for { - if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) && - time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) { - break - } - + for curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) && + time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint) { // We need to check alerts for this time. // If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval' // then we compare alerts with the Eval at `ts`. diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index fb249b8256..ff1059ede0 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -115,6 +115,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err region, err := metadata.Region() if err != nil { + //nolint:staticcheck // Capitalized first word. return errors.New("Lightsail SD configuration requires a region") } c.Region = region diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index ba3f63ccb5..ea896ce31b 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -425,14 +425,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { d := newDiscovery(t, config) // Should be empty if not initialized. - require.Equal(t, "", d.clientDatacenter) + require.Empty(t, d.clientDatacenter) err = d.getDatacenter() // An error should be returned. require.EqualError(t, err, tc.errMessage) // Should still be empty. - require.Equal(t, "", d.clientDatacenter) + require.Empty(t, d.clientDatacenter) } } diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 8704a66239..169c6a78a1 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -219,7 +219,7 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { podPhaseLabel: lv(string(pod.Status.Phase)), podNodeNameLabel: lv(pod.Spec.NodeName), podHostIPLabel: lv(pod.Status.HostIP), - podUID: lv(string(pod.ObjectMeta.UID)), + podUID: lv(string(pod.UID)), } addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 453901bc05..033025f840 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -194,7 +194,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error - if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) { + if !errors.As(err, &e) || e.Code != http.StatusUnauthorized { return nil, err } // If we get a 401, the token doesn't have `events:read_only` scope. diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 0ff82d5415..1dd10baf47 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -695,7 +695,7 @@ func TestTargetUpdatesOrder(t *testing.T) { for x := 0; x < totalUpdatesCount; x++ { select { case <-ctx.Done(): - require.FailNow(t, "%d: no update arrived within the timeout limit", x) + t.Fatalf("%d: no update arrived within the timeout limit", x) case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { @@ -769,12 +769,10 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou } } } - if match != present { - msg := "" - if !present { - msg = "not" - } - require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets) + if present { + require.Truef(t, match, "%q must be present in Targets labels: %q", label, mergedTargets) + } else { + require.Falsef(t, match, "%q must be absent in Targets labels: %q", label, mergedTargets) } } @@ -1091,9 +1089,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { targetGroups, ok := discoveryManager.targets[p] require.True(t, ok, "'%v' should be present in targets", p) // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436. - require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p) + require.Empty(t, targetGroups, "'%v' should no longer have any associated target groups", p) require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.") - require.Empty(t, syncedTargets["prometheus"], 0) + require.Empty(t, syncedTargets["prometheus"]) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1373,10 +1371,10 @@ func TestCoordinationWithReceiver(t *testing.T) { time.Sleep(expected.delay) select { case <-ctx.Done(): - require.FailNow(t, "step %d: no update received in the expected timeframe", i) + t.Fatalf("step %d: no update received in the expected timeframe", i) case tgs, ok := <-mgr.SyncCh(): require.True(t, ok, "step %d: discovery manager channel is closed", i) - require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i) + require.Len(t, tgs, len(expected.tgs), "step %d: targets mismatch", i) for k := range expected.tgs { _, ok := tgs[k] diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 61d8ef900d..18ec7bdf19 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -202,7 +202,7 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) } @@ -300,9 +300,9 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] @@ -354,12 +354,12 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } @@ -401,13 +401,13 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -458,12 +458,12 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -514,12 +514,12 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList { @@ -574,10 +574,10 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 53a8b2e135..2b640dea82 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -235,10 +235,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode) if len(networks) == 0 { // Try to lookup shared networks - for { - if !containerNetworkMode.IsContainer() { - break - } + for containerNetworkMode.IsContainer() { tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] if !exists { break diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 291809be68..162a75e407 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -182,9 +182,10 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, var ipv6Addresses []string for _, ip := range server.PublicIPs { - if ip.Family == instance.ServerIPIPFamilyInet { + switch ip.Family { + case instance.ServerIPIPFamilyInet: ipv4Addresses = append(ipv4Addresses, ip.Address.String()) - } else if ip.Family == instance.ServerIPIPFamilyInet6 { + case instance.ServerIPIPFamilyInet6: ipv6Addresses = append(ipv6Addresses, ip.Address.String()) } } diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 11b1888db4..a7745eed46 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -141,18 +141,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.Server == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires server host") } _, err = url.Parse(c.Server) if err != nil { + //nolint:staticcheck // Capitalized first word. return fmt.Errorf("Uyuni Server URL is not valid: %w", err) } if c.Username == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a username") } if c.Password == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a password") } return c.HTTPClientConfig.Validate() diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index 9ea9b8e5f9..ffcbb5385a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -145,7 +145,7 @@ func parseFlags() *config { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err) a.Usage(os.Args[1:]) os.Exit(2) } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go index 99cef0b242..6a691778af 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go @@ -78,7 +78,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) { case b == ':': result.WriteString("_.") default: - result.WriteString(fmt.Sprintf("_%X", b)) + fmt.Fprintf(result, "_%X", b) } } result.WriteByte('"') diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index e5519a56d6..0a2b43951e 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -1016,7 +1016,7 @@ type floatBucketIterator struct { func (i *floatBucketIterator) At() Bucket[float64] { // Need to use i.targetSchema rather than i.baseBucketIterator.schema. - return i.baseBucketIterator.at(i.targetSchema) + return i.at(i.targetSchema) } func (i *floatBucketIterator) Next() bool { diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index b7ba71b553..2abc322699 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -513,7 +513,7 @@ func TestLabels_Has(t *testing.T) { } func TestLabels_Get(t *testing.T) { - require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo")) + require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo")) require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } diff --git a/prompb/io/prometheus/client/decoder.go b/prompb/io/prometheus/client/decoder.go index 0d62f1f7cf..2f11d278ce 100644 --- a/prompb/io/prometheus/client/decoder.go +++ b/prompb/io/prometheus/client/decoder.go @@ -81,7 +81,7 @@ func (m *MetricStreamingDecoder) NextMetricFamily() error { m.mfData = b[varIntLength:totalLength] m.inPos += totalLength - return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData) + return m.unmarshalWithoutMetrics(m, m.mfData) } // resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory. @@ -98,7 +98,7 @@ func (m *MetricStreamingDecoder) NextMetric() error { m.resetMetric() m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end] - if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil { + if err := m.unmarshalWithoutLabels(m, m.mData); err != nil { return err } m.metricIndex++ @@ -111,37 +111,37 @@ func (m *MetricStreamingDecoder) resetMetric() { m.TimestampMs = 0 // TODO(bwplotka): Autogenerate reset functions. - if m.Metric.Counter != nil { - m.Metric.Counter.Value = 0 - m.Metric.Counter.CreatedTimestamp = nil - m.Metric.Counter.Exemplar = nil + if m.Counter != nil { + m.Counter.Value = 0 + m.Counter.CreatedTimestamp = nil + m.Counter.Exemplar = nil } - if m.Metric.Gauge != nil { - m.Metric.Gauge.Value = 0 + if m.Gauge != nil { + m.Gauge.Value = 0 } - if m.Metric.Histogram != nil { - m.Metric.Histogram.SampleCount = 0 - m.Metric.Histogram.SampleCountFloat = 0 - m.Metric.Histogram.SampleSum = 0 - m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0] - m.Metric.Histogram.CreatedTimestamp = nil - m.Metric.Histogram.Schema = 0 - m.Metric.Histogram.ZeroThreshold = 0 - m.Metric.Histogram.ZeroCount = 0 - m.Metric.Histogram.ZeroCountFloat = 0 - m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0] - m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0] - m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0] - m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0] - m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0] - m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0] - m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0] + if m.Histogram != nil { + m.Histogram.SampleCount = 0 + m.Histogram.SampleCountFloat = 0 + m.Histogram.SampleSum = 0 + m.Histogram.Bucket = m.Histogram.Bucket[:0] + m.Histogram.CreatedTimestamp = nil + m.Histogram.Schema = 0 + m.Histogram.ZeroThreshold = 0 + m.Histogram.ZeroCount = 0 + m.Histogram.ZeroCountFloat = 0 + m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0] + m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0] + m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0] + m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0] + m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0] + m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0] + m.Histogram.Exemplars = m.Histogram.Exemplars[:0] } - if m.Metric.Summary != nil { - m.Metric.Summary.SampleCount = 0 - m.Metric.Summary.SampleSum = 0 - m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0] - m.Metric.Summary.CreatedTimestamp = nil + if m.Summary != nil { + m.Summary.SampleCount = 0 + m.Summary.SampleSum = 0 + m.Summary.Quantile = m.Summary.Quantile[:0] + m.Summary.CreatedTimestamp = nil } } diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go index 2ab95e0d19..b91355c51c 100644 --- a/prompb/rwcommon/codec_test.go +++ b/prompb/rwcommon/codec_test.go @@ -135,12 +135,12 @@ func TestToMetadata(t *testing.T) { func TestToHistogram_Empty(t *testing.T) { t.Run("v1", func(t *testing.T) { - require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "") - require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "") + require.NotNil(t, prompb.Histogram{}.ToIntHistogram()) + require.NotNil(t, prompb.Histogram{}.ToFloatHistogram()) }) t.Run("v2", func(t *testing.T) { - require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "") - require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "") + require.NotNil(t, writev2.Histogram{}.ToIntHistogram()) + require.NotNil(t, writev2.Histogram{}.ToFloatHistogram()) }) } diff --git a/promql/bench_test.go b/promql/bench_test.go index 943baceecb..9741a02102 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -89,8 +89,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in } } - stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. - stor.DB.Compact(ctx) + stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.Compact(ctx) return nil } @@ -269,7 +269,7 @@ func rangeQueryCases() []benchCase { func BenchmarkRangeQuery(b *testing.B) { stor := teststorage.New(b) - stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings. + stor.DisableCompactions() // Don't want auto-compaction disrupting timings. defer stor.Close() opts := promql.EngineOpts{ Logger: nil, @@ -498,8 +498,8 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage require.NoError(tb, a.Commit()) } - stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. - stor.DB.Compact(ctx) + stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.Compact(ctx) } func generateNativeHistogramSeries(app storage.Appender, numSeries int) error { diff --git a/promql/engine.go b/promql/engine.go index f1829efdd8..d5a192f8ba 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -731,7 +731,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr) evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval) // Instant evaluation. This is executed as a range evaluation with one step. - if s.Start == s.End && s.Interval == 0 { + if s.Start.Equal(s.End) && s.Interval == 0 { start := timeMilliseconds(s.Start) evaluator := &evaluator{ startTimestamp: start, diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 66522f59da..3bb74b4ad2 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -674,10 +674,10 @@ func lexInsideBraces(l *Lexer) stateFn { l.backup() l.emit(EQL) case r == '!': - switch nr := l.next(); { - case nr == '~': + switch nr := l.next(); nr { + case '~': l.emit(NEQ_REGEX) - case nr == '=': + case '=': l.emit(NEQ) default: return l.errorf("unexpected character after '!' inside braces: %q", nr) diff --git a/rules/manager_test.go b/rules/manager_test.go index 46a87787ce..efd7a8b23c 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -180,7 +180,7 @@ func TestAlertingRule(t *testing.T) { for i := range test.result { test.result[i].T = timestamp.FromTime(evalTime) } - require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) + require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) sort.Slice(filteredRes, func(i, j int) bool { return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 @@ -188,7 +188,7 @@ func TestAlertingRule(t *testing.T) { prom_testutil.RequireEqual(t, test.result, filteredRes) for _, aa := range rule.ActiveAlerts() { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } } } @@ -333,7 +333,7 @@ func TestForStateAddSamples(t *testing.T) { test.result[i].F = forState } } - require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) + require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) sort.Slice(filteredRes, func(i, j int) bool { return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 @@ -341,7 +341,7 @@ func TestForStateAddSamples(t *testing.T) { prom_testutil.RequireEqual(t, test.result, filteredRes) for _, aa := range rule.ActiveAlerts() { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } } }) @@ -489,7 +489,7 @@ func TestForStateRestore(t *testing.T) { got := newRule.ActiveAlerts() for _, aa := range got { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } sort.Slice(got, func(i, j int) bool { return labels.Compare(got[i].Labels, got[j].Labels) < 0 @@ -513,7 +513,7 @@ func TestForStateRestore(t *testing.T) { } default: exp := tt.expectedAlerts - require.Equal(t, len(exp), len(got)) + require.Len(t, got, len(exp)) sortAlerts(exp) sortAlerts(got) for i, e := range exp { @@ -2442,7 +2442,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { wg.Wait() // Synchronous queries also count towards inflight, so at most we can have maxConcurrency+$groupCount inflight evaluations. - require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) + require.Equal(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) } func TestUpdateWhenStopped(t *testing.T) { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 3ddb767356..699a3864b2 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1157,7 +1157,7 @@ func TestScrapeLoopRun(t *testing.T) { case <-time.After(5 * time.Second): require.FailNow(t, "Cancellation during initial offset failed.") case err := <-errc: - require.FailNow(t, "Unexpected error: %s", err) + require.FailNow(t, "Unexpected error", "err: %s", err) } // The provided timeout must cause cancellation of the context passed down to the @@ -1200,7 +1200,7 @@ func TestScrapeLoopRun(t *testing.T) { case <-signal: // Loop terminated as expected. case err := <-errc: - require.FailNow(t, "Unexpected error: %s", err) + require.FailNow(t, "Unexpected error", "err: %s", err) case <-time.After(3 * time.Second): require.FailNow(t, "Loop did not terminate on context cancellation") } @@ -1309,14 +1309,14 @@ test_metric_total 1 md, ok = cache.GetMetadata("test_metric_no_help") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type") - require.Equal(t, "", md.Help) - require.Equal(t, "", md.Unit) + require.Empty(t, md.Help) + require.Empty(t, md.Unit) md, ok = cache.GetMetadata("test_metric_no_type") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type") require.Equal(t, "other help text", md.Help) - require.Equal(t, "", md.Unit) + require.Empty(t, md.Unit) } func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { @@ -1567,7 +1567,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) { if numScrapes == cue { action(sl) } - w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes))) + fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes) return nil } sl.run(nil) @@ -4259,7 +4259,7 @@ test_summary_count 199 foundLeValues[v] = true } - require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected") + require.Len(t, foundLeValues, len(expectedValues), "number of label values not as expected") for _, v := range expectedValues { require.Contains(t, foundLeValues, v, "label value not found") } @@ -4568,7 +4568,7 @@ metric: < foundLeValues[v] = true } - require.Equal(t, len(expectedValues), len(foundLeValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues) + require.Len(t, foundLeValues, len(expectedValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues) for _, v := range expectedValues { require.Contains(t, foundLeValues, v, "label value not found") } @@ -4817,7 +4817,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * switch numScrapes { case 1: - w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)))) + fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)) return nil case 5: cancel() @@ -4867,7 +4867,7 @@ func TestScrapeLoopCompression(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header") - fmt.Fprint(w, metricsText) + fmt.Fprint(w, string(metricsText)) close(scraped) })) defer ts.Close() @@ -5164,7 +5164,7 @@ scrape_configs: s := teststorage.New(t) defer s.Close() - s.DB.EnableNativeHistograms() + s.EnableNativeHistograms() reg := prometheus.NewRegistry() mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) diff --git a/scrape/target.go b/scrape/target.go index 4f576504f0..30b47976a3 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -144,7 +144,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) { func (t *Target) hash() uint64 { h := fnv.New64a() - h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) + fmt.Fprintf(h, "%016d", t.labels.Hash()) h.Write([]byte(t.URL().String())) return h.Sum64() diff --git a/storage/merge.go b/storage/merge.go index bc70ceea55..9b3bcee580 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -64,10 +64,8 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer queriers = append(queriers, newSecondaryQuerierFrom(q)) } - concurrentSelect := false - if len(secondaries) > 0 { - concurrentSelect = true - } + concurrentSelect := len(secondaries) > 0 + return &querierAdapter{&mergeGenericQuerier{ mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge, queriers: queriers, @@ -111,10 +109,8 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica queriers = append(queriers, newSecondaryQuerierFromChunk(q)) } - concurrentSelect := false - if len(secondaries) > 0 { - concurrentSelect = true - } + concurrentSelect := len(secondaries) > 0 + return &chunkQuerierAdapter{&mergeGenericQuerier{ mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge, queriers: queriers, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 09be335a8b..527a0c879f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -210,7 +210,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained - if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { + if !settings.AllowUTF8 && (len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__") { name = otlptranslator.NormalizeLabel(name) } l[name] = extras[i+1] diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 79d127bb80..3d0285a185 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -102,8 +102,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric // Cumulative temporality is always valid. // Delta temporality is also valid if AllowDeltaTemporality is true. // All other temporality values are invalid. - !(temporality == pmetric.AggregationTemporalityCumulative || - (settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) { + (temporality != pmetric.AggregationTemporalityCumulative && + (!settings.AllowDeltaTemporality || temporality != pmetric.AggregationTemporalityDelta)) { errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) continue } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 87567fb9c6..db602b8dc3 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -515,10 +515,8 @@ func NewQueueManager( compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use. } - walMetadata := false - if t.protoMsg != config.RemoteWriteProtoMsgV1 { - walMetadata = true - } + walMetadata := t.protoMsg != config.RemoteWriteProtoMsgV1 + t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata) // The current MetadataWatcher implementation is mutually exclusive diff --git a/storage/remote/read.go b/storage/remote/read.go index 2ec48784dc..881b5c28d1 100644 --- a/storage/remote/read.go +++ b/storage/remote/read.go @@ -93,7 +93,7 @@ func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage. noop bool err error ) - cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt) + cq.maxt, noop, err = c.preferLocalStorage(mint, maxt) if err != nil { return nil, err } diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index fd7f3ad48d..6b6275edac 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -277,7 +277,7 @@ func TestStreamReadEndpoint(t *testing.T) { require.Equal(t, 2, recorder.Code/100) require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type")) - require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding")) + require.Empty(t, recorder.Result().Header.Get("Content-Encoding")) var results []*prompb.ChunkedReadResponse stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil) diff --git a/storage/series_test.go b/storage/series_test.go index 5309494069..1ade558648 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -112,7 +112,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { require.Len(t, ssSlice, 2) var iter chunkenc.Iterator for i, s := range ssSlice { - require.EqualValues(t, series[i].lbs, s.Labels()) + require.Equal(t, series[i].lbs, s.Labels()) iter = s.Iterator(iter) j := 0 for iter.Next() == chunkenc.ValFloat { @@ -597,15 +597,15 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { } series := NewListSeries(lbs, copiedSamples) encoder := NewSeriesToChunkEncoder(series) - require.EqualValues(t, lbs, encoder.Labels()) + require.Equal(t, lbs, encoder.Labels()) chks, err := ExpandChunks(encoder.Iterator(nil)) require.NoError(t, err) - require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) + require.Len(t, chks, len(test.expectedCounterResetHeaders)) // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := chunks.ChunkMetasToSamples(chks) - require.Equal(t, len(test.expectedSamples), len(encodedSamples)) + require.Len(t, encodedSamples, len(test.expectedSamples)) for i, s := range test.expectedSamples { encodedSample := encodedSamples[i] diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index db98e87408..0cd780677e 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -1305,7 +1305,7 @@ func TestDBCreatedTimestampSamplesIngestion(t *testing.T) { outputSamples := readWALSamples(t, s.wal.Dir()) - require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples)) + require.Len(t, outputSamples, len(tc.expectedSamples), "Expected %d samples", len(tc.expectedSamples)) for i, expectedSample := range tc.expectedSamples { for _, sample := range outputSamples { diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index 4c0b27e82f..68742471e6 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -129,7 +129,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { // Checking on-disk bytes for the first file. require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) - require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers)) + require.Len(t, hrw.closers, len(hrw.mmappedChunkFiles)) actualBytes, err := os.ReadFile(firstFileName) require.NoError(t, err) @@ -208,9 +208,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) { files, err := os.ReadDir(hrw.dir.Name()) require.NoError(t, err) - require.Equal(t, len(remainingFiles), len(files), "files on disk") - require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles") - require.Equal(t, len(remainingFiles), len(hrw.closers), "closers") + require.Len(t, files, len(remainingFiles), "files on disk") + require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles") + require.Len(t, hrw.closers, len(remainingFiles), "closers") for _, i := range remainingFiles { _, ok := hrw.mmappedChunkFiles[i] @@ -325,9 +325,9 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) { files, err := os.ReadDir(hrw.dir.Name()) require.NoError(t, err) - require.Equal(t, len(remainingFiles), len(files), "files on disk") - require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles") - require.Equal(t, len(remainingFiles), len(hrw.closers), "closers") + require.Len(t, files, len(remainingFiles), "files on disk") + require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles") + require.Len(t, hrw.closers, len(remainingFiles), "closers") for _, i := range remainingFiles { _, ok := hrw.mmappedChunkFiles[i] diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 4b10a42ef7..655cfa408b 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1399,7 +1399,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime)) } require.NoError(t, db.reload()) - require.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks") + require.Len(t, db.Blocks(), len(blocks), "unexpected block count after a reloadBlocks") return len(blocks) }, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 6270220be4..b5ce5f20fd 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1347,7 +1347,7 @@ func TestTombstoneCleanFail(t *testing.T) { actualBlockDirs, err := blockDirs(db.dir) require.NoError(t, err) // Only one block should have been replaced by a new block. - require.Equal(t, len(oldBlockDirs), len(actualBlockDirs)) + require.Len(t, actualBlockDirs, len(oldBlockDirs)) require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1) } @@ -1535,7 +1535,7 @@ func TestSizeRetention(t *testing.T) { // Test that registered size matches the actual disk size. require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size. - require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered. + require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered. blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics. walSize, err := db.Head().wal.Size() require.NoError(t, err) @@ -2052,7 +2052,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.NoError(t, db.Compact(ctx)) actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Empty(t, actBlocks) require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here") }) @@ -2072,7 +2072,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Empty(t, actBlocks) app = db.Appender(ctx) @@ -2093,7 +2093,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here") actBlocks, err = blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples") }) @@ -2134,7 +2134,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.") }) } @@ -2450,7 +2450,7 @@ func TestDBReadOnly(t *testing.T) { t.Run("blocks", func(t *testing.T) { blocks, err := dbReadOnly.Blocks() require.NoError(t, err) - require.Equal(t, len(expBlocks), len(blocks)) + require.Len(t, blocks, len(expBlocks)) for i, expBlock := range expBlocks { require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch") } @@ -2478,7 +2478,7 @@ func TestDBReadOnly(t *testing.T) { readOnlySeries := query(t, q, matchAll) readOnlyDBHash := testutil.DirHash(t, dbDir) - require.Equal(t, len(expSeries), len(readOnlySeries), "total series mismatch") + require.Len(t, readOnlySeries, len(expSeries), "total series mismatch") require.Equal(t, expSeries, readOnlySeries, "series mismatch") require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same") }) @@ -2488,7 +2488,7 @@ func TestDBReadOnly(t *testing.T) { readOnlySeries := queryAndExpandChunks(t, cq, matchAll) readOnlyDBHash := testutil.DirHash(t, dbDir) - require.Equal(t, len(expChunks), len(readOnlySeries), "total series mismatch") + require.Len(t, readOnlySeries, len(expChunks), "total series mismatch") require.Equal(t, expChunks, readOnlySeries, "series chunks mismatch") require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same") }) @@ -8260,7 +8260,7 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, db.Compact(ctx)) verifyBlockRanges := func() { blocks := db.Blocks() - require.Equal(t, len(c.blockRanges), len(blocks)) + require.Len(t, blocks, len(c.blockRanges)) for j, br := range c.blockRanges { require.Equal(t, br[0]*time.Minute.Milliseconds(), blocks[j].MinTime()) require.Equal(t, br[1]*time.Minute.Milliseconds(), blocks[j].MaxTime()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index dcf9c9c9aa..561c8c789d 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4666,7 +4666,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { } // We cannot compare StaleNAN with require.Equal, hence checking each histogram manually. - require.Equal(t, len(expHistograms), len(actHistograms)) + require.Len(t, actHistograms, len(expHistograms)) actNumStale := 0 for i, eh := range expHistograms { ah := actHistograms[i] @@ -5304,7 +5304,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // Verify the snapshot. name, idx, offset, err := LastChunkSnapshot(dir) require.NoError(t, err) - require.NotEqual(t, "", name) + require.NotEmpty(t, name) require.Equal(t, 0, idx) require.Positive(t, offset) } diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index ee186c1d95..e3fe5a41fd 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -424,7 +424,7 @@ func TestPersistence_index_e2e(t *testing.T) { res, err := ir.SortedLabelValues(ctx, k) require.NoError(t, err) - require.Equal(t, len(v), len(res)) + require.Len(t, res, len(v)) for i := 0; i < len(v); i++ { require.Equal(t, v[i], res[i]) } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index d49c7d8fc3..9dcf125b92 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -860,7 +860,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) require.NoError(t, err) - require.Equal(t, len(tc.expChunksSamples), len(chks)) + require.Len(t, chks, len(tc.expChunksSamples)) cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0) defer cr.Close() @@ -1030,7 +1030,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) require.NoError(t, err) - require.Equal(t, len(tc.expChunksSamples), len(chks)) + require.Len(t, chks, len(tc.expChunksSamples)) // Now we keep receiving ooo samples // OOO few samples for s1. diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index 07467479dc..2d5901a13b 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -246,7 +246,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { // Sanity check. - require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets") + require.Len(t, tc.expectedCounterResets, len(tc.samples), "number of samples and counter resets") oooChunk := OOOChunk{} for _, s := range tc.samples { @@ -264,7 +264,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) { chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64) require.NoError(t, err) - require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks") + require.Len(t, chunks, len(tc.expectedChunks), "number of chunks") sampleIndex := 0 for i, c := range chunks { require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i) diff --git a/tsdb/querier.go b/tsdb/querier.go index 5d9801f2b8..f7d564a2dd 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -525,7 +525,7 @@ func (b *blockBaseSeriesSet) Next() bool { // Count those in range to size allocation (roughly - ignoring tombstones). nChks := 0 for _, chk := range b.bufChks { - if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) { + if chk.MaxTime >= b.mint && chk.MinTime <= b.maxt { nChks++ } } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index f55d57cc79..cb96fa3716 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -263,7 +263,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C rmChunkRefs(chksRes) require.Equal(t, errExp, errRes) - require.Equal(t, len(chksExp), len(chksRes)) + require.Len(t, chksRes, len(chksExp)) var exp, act [][]chunks.Sample for i := range chksExp { samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil) diff --git a/tsdb/testutil.go b/tsdb/testutil.go index 4dac8c29ff..4d413322c8 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -174,7 +174,7 @@ func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sa } } - require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name) + require.Len(t, actual, len(expected), "Length not equal to expected for %s", name) for i, s := range expected { expectedSample := s actualSample := actual[i] diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 95783957a7..a8e1d9f900 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -125,12 +125,13 @@ func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) { return } -//nolint:revive // error-naming. +//nolint:staticcheck,revive // error-naming. var ( // Currently there are only 2 types, warnings and info. // For now, info are visually identical with warnings as we have not updated // the API spec or the frontend to show a different kind of warning. But we // make the distinction here to prepare for adding them in future. + PromQLInfo = errors.New("PromQL info") PromQLWarning = errors.New("PromQL warning") diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index 6964952af4..f324afa3d6 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -50,7 +50,7 @@ func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error { return err } - return writeSubcommands(writer, 1, model.Name, model.CmdGroupModel.Commands) + return writeSubcommands(writer, 1, model.Name, model.Commands) } func header(title, help string) []byte { @@ -172,13 +172,13 @@ func writeTable(writer io.Writer, data [][]string, header string) error { buf := bytes.NewBuffer(nil) - buf.WriteString(fmt.Sprintf("\n\n%s\n\n", header)) + fmt.Fprintf(buf, "\n\n%s\n\n", header) columnsToRender := determineColumnsToRender(data) headers := data[0] buf.WriteString("|") for _, j := range columnsToRender { - buf.WriteString(fmt.Sprintf(" %s |", headers[j])) + fmt.Fprintf(buf, " %s |", headers[j]) } buf.WriteString("\n") @@ -192,7 +192,7 @@ func writeTable(writer io.Writer, data [][]string, header string) error { row := data[i] buf.WriteString("|") for _, j := range columnsToRender { - buf.WriteString(fmt.Sprintf(" %s |", row[j])) + fmt.Fprintf(buf, " %s |", row[j]) } buf.WriteString("\n") } @@ -243,7 +243,7 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] help = cmd.HelpLong } help = formatHyphenatedWords(help) - if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil { + if _, err := fmt.Fprintf(writer, "\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help); err != nil { return err } @@ -255,8 +255,8 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] return err } - if cmd.CmdGroupModel != nil && len(cmd.CmdGroupModel.Commands) > 0 { - if err := writeSubcommands(writer, level+1, modelName, cmd.CmdGroupModel.Commands); err != nil { + if cmd.CmdGroupModel != nil && len(cmd.Commands) > 0 { + if err := writeSubcommands(writer, level+1, modelName, cmd.Commands); err != nil { return err } } diff --git a/util/stats/query_stats.go b/util/stats/query_stats.go index e83a6015c7..b1c91a69fd 100644 --- a/util/stats/query_stats.go +++ b/util/stats/query_stats.go @@ -134,7 +134,7 @@ func NewQueryStats(s *Statistics) QueryStats { sp = s.Samples ) - for s, timer := range tg.TimerGroup.timers { + for s, timer := range tg.timers { switch s { case EvalTotalTime: qt.EvalTotalTime = timer.Duration() @@ -328,5 +328,5 @@ func (qs *QuerySamples) NewChild() *QuerySamples { } func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) { - return NewSpanTimer(ctx, qt.SpanOperation(), qs.TimerGroup.GetTimer(qt), observers...) + return NewSpanTimer(ctx, qt.SpanOperation(), qs.GetTimer(qt), observers...) } diff --git a/util/strutil/strconv.go b/util/strutil/strconv.go index 8cdd7d4830..88d2a3b610 100644 --- a/util/strutil/strconv.go +++ b/util/strutil/strconv.go @@ -54,10 +54,10 @@ func SanitizeFullLabelName(name string) string { } var validSb strings.Builder for i, b := range name { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - validSb.WriteRune('_') - } else { + if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0) { validSb.WriteRune(b) + } else { + validSb.WriteRune('_') } } return validSb.String() diff --git a/web/web.go b/web/web.go index 84c4a2a529..601d42cbea 100644 --- a/web/web.go +++ b/web/web.go @@ -812,7 +812,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { hostname, err := os.Hostname() if err != nil { - return status, fmt.Errorf("Error getting hostname: %w", err) + return status, fmt.Errorf("error getting hostname: %w", err) } status.Hostname = hostname status.ServerTime = time.Now().UTC() diff --git a/web/web_test.go b/web/web_test.go index 696ba80d1d..ea7e099041 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -624,7 +624,7 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) { b, err := io.ReadAll(resp.Body) require.NoError(t, err) require.NoError(t, json.Unmarshal(b, snapshot)) - require.NotZero(t, snapshot.Data.Name, "snapshot directory not returned") + require.NotEmpty(t, snapshot.Data.Name, "snapshot directory not returned") require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name))) require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots"))) } From 4685756c8b90463ce636bf78fc77e5bf4a03a700 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 May 2025 08:34:28 +0000 Subject: [PATCH 18/39] chore(deps): bump github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor Bumps [github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib) from 0.124.1 to 0.125.0. - [Release notes](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.124.1...v0.125.0) --- updated-dependencies: - dependency-name: github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor dependency-version: 0.125.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 14 +++++++------- go.sum | 52 ++++++++++++++++++++++++++-------------------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index c463064707..be26346915 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid/v2 v2.1.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.125.0 github.com/ovh/go-ovh v1.7.0 github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/client_golang v1.22.0 @@ -162,8 +162,8 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/knadh/koanf/maps v0.1.2 // indirect - github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/knadh/koanf/providers/confmap v1.0.0 // indirect + github.com/knadh/koanf/v2 v2.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -180,8 +180,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.125.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.125.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -197,8 +197,8 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/confmap v1.30.0 // indirect - go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 // indirect + go.opentelemetry.io/collector/confmap v1.31.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.125.0 // indirect go.opentelemetry.io/collector/pipeline v0.125.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/zap v1.27.0 // indirect diff --git a/go.sum b/go.sum index 88246f2b06..730f848832 100644 --- a/go.sum +++ b/go.sum @@ -280,10 +280,10 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= -github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= -github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE= +github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A= +github.com/knadh/koanf/v2 v2.2.0 h1:FZFwd9bUjpb8DyCWARUBy5ovuhDs1lI87dOEn2K8UVU= +github.com/knadh/koanf/v2 v2.2.0/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -373,14 +373,14 @@ github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1 h1:jOG1ceAx+IATloKXHsE2Cy88XTgqPB/hiXicOrxENx8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.124.1/go.mod h1:mtNCoy09iO1f2zy5bEqkyRfRPaNKea57yK63cfHixts= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1 h1:G2daAIXiQhAwQSz9RK71QsBH9rmH/m/vdkFuGIEPfS4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.124.1/go.mod h1:/WAA1PKvHNz7E5SrtGg2KfAWl/PrmS0FVYOanoGxk0I= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1 h1:mMVzpkpy6rKL1Q/xXNogZVtWebIlxTRzhsgp3b9ioCM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.124.1/go.mod h1:jM8Gsd0fIiwRzWrzd7Gm6PZYi5AgHPRkz0625Rtqyxo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1 h1:gmmzhgewk2fU0Md0vmaDEFgfRycfCfjgPvMA4SEdKiU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.124.1/go.mod h1:AsQJBuUUY1/yqK2c87hv4deeteaKwktwLIfQCN2OGk4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.125.0 h1:xNTSTF+Z0Vn3Nt2aUJ5JrJUUsrDA4l+oy2hVIGhPHxc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.125.0/go.mod h1:wBz+TYCFKo0gZtIxORKtTKaUZqTJFTZh/bkyQ9tUqMg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.125.0 h1:IzhLlqlwxWM0PcGeyq6ispujXRTyzeA37LNtcQHOvdg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.125.0/go.mod h1:/WDZg8/Uk2niDeFWkijYvWkQ9gaRF0Vkj/RxGDRcMEY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.125.0 h1:ZzDmvZcWi59c4gZLlkV+NbzDseuFNPePhgZ8XoZqfAI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.125.0/go.mod h1:Hulx7f7AcWKM7crzT0HKxubNqN4qMF8wGyrC3W0BIYc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.125.0 h1:nuXuleB2L/E8nctDbyRWKGv3DlAggzc4mtnQKf291PY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.125.0/go.mod h1:AFs92cGgB/uaKbX48kuI7eawXr6eG93sCMvaCV5a/yw= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -490,14 +490,14 @@ go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJyS go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/collector/component v1.31.0 h1:9LzU8X1RhV3h8/QsAoTX23aFUfoJ3EUc9O/vK+hFpSI= go.opentelemetry.io/collector/component v1.31.0/go.mod h1:JbZl/KywXJxpUXPbt96qlEXJSym1zQ2hauMxYMuvlxM= -go.opentelemetry.io/collector/component/componentstatus v0.124.0 h1:0WHaANNktxLIk+lN+CtgPBESI1MJBrfVW/LvNCbnMQ4= -go.opentelemetry.io/collector/component/componentstatus v0.124.0/go.mod h1:a/wa8nxJGWOGuLwCN8gHCzFHCaUVZ+VyUYuKz9Yaq38= -go.opentelemetry.io/collector/component/componenttest v0.124.0 h1:Wsc+DmDrWTFs/aEyjDA3slNwV+h/0NOyIR5Aywvr6Zw= -go.opentelemetry.io/collector/component/componenttest v0.124.0/go.mod h1:NQ4ATOzMFc7QA06B993tq8o27DR0cu/JR/zK7slGJ3E= -go.opentelemetry.io/collector/confmap v1.30.0 h1:Y0MXhjQCdMyJN9xZMWWdNPWs6ncMVf7YVnyAEN2dAcM= -go.opentelemetry.io/collector/confmap v1.30.0/go.mod h1:9DdThVDIC3VsdtTb7DgT+HwusWOocoqDkd/TErEtQgA= -go.opentelemetry.io/collector/confmap/xconfmap v0.124.0 h1:PK+CaSgjLvzHaafBieJ3AjiUTAPuf40C+/Fn38LvmW8= -go.opentelemetry.io/collector/confmap/xconfmap v0.124.0/go.mod h1:DZmFSgWiqXQrzld9uU+73YAVI5JRIgd8RkK5HcaXGU0= +go.opentelemetry.io/collector/component/componentstatus v0.125.0 h1:zlxGQZYd9kknRZSjRpOYW5SBjl0a5zYFYRPbreobXoU= +go.opentelemetry.io/collector/component/componentstatus v0.125.0/go.mod h1:bHXc2W8bqqo9adOvCgvhcO7pYzJOSpyV4cuQ1wiIl04= +go.opentelemetry.io/collector/component/componenttest v0.125.0 h1:E2mpnMQbkMpYoZ3Q8pHx4kod7kedjwRs1xqDpzCe/84= +go.opentelemetry.io/collector/component/componenttest v0.125.0/go.mod h1:pQtsE1u/SPZdTphP5BZP64XbjXSq6wc+mDut5Ws/JDI= +go.opentelemetry.io/collector/confmap v1.31.0 h1:+AW5VJc1rCtgEyGd+1J5uSNw/kVZ98+lKO/pqXEwVvU= +go.opentelemetry.io/collector/confmap v1.31.0/go.mod h1:TdutQlIoHDPXcZ2xZ0QWGRkSFC8oTKO61zTx569dvrY= +go.opentelemetry.io/collector/confmap/xconfmap v0.125.0 h1:Y0LPtz+xgtRYVAk2gZmvnBROEJj8C3YDiFPj5URbsX8= +go.opentelemetry.io/collector/confmap/xconfmap v0.125.0/go.mod h1:8hNqCMs9Gzahh4W1h5XWOrQ+bE6NfP13WAggNyExJJs= go.opentelemetry.io/collector/consumer v1.31.0 h1:L+y66ywxLHnAxnUxv0JDwUf5bFj53kMxCCyEfRKlM7s= go.opentelemetry.io/collector/consumer v1.31.0/go.mod h1:rPsqy5ni+c6xNMUkOChleZYO/nInVY6eaBNZ1FmWJVk= go.opentelemetry.io/collector/consumer/consumertest v0.125.0 h1:TUkxomGS4DAtjBvcWQd2UY4FDLLEKMQD6iOIDUr/5dM= @@ -512,16 +512,16 @@ go.opentelemetry.io/collector/pdata v1.31.0 h1:P5WuLr1l2JcIvr6Dw2hl01ltp2ZafPnC4 go.opentelemetry.io/collector/pdata v1.31.0/go.mod h1:m41io9nWpy7aCm/uD1L9QcKiZwOP0ldj83JEA34dmlk= go.opentelemetry.io/collector/pdata/pprofile v0.125.0 h1:Qqlx8w1HpiYZ9RQqjmMQIysI0cHNO1nh3E/fCTeFysA= go.opentelemetry.io/collector/pdata/pprofile v0.125.0/go.mod h1:p/yK023VxAp8hm27/1G5DPTcMIpnJy3cHGAFUQZGyaQ= -go.opentelemetry.io/collector/pdata/testdata v0.124.0 h1:vY+pWG7CQfzzGSB5+zGYHQOltRQr59Ek9QiPe+rI+NY= -go.opentelemetry.io/collector/pdata/testdata v0.124.0/go.mod h1:lNH48lGhGv4CYk27fJecpsR1zYHmZjKgNrAprwjym0o= +go.opentelemetry.io/collector/pdata/testdata v0.125.0 h1:due1Hl0EEVRVwfCkiamRy5E8lS6yalv0lo8Zl/SJtGw= +go.opentelemetry.io/collector/pdata/testdata v0.125.0/go.mod h1:1GpEWlgdMrd+fWsBk37ZC2YmOP5YU3gFQ4rWuCu9g24= go.opentelemetry.io/collector/pipeline v0.125.0 h1:oitBgcAFqntDB4ihQJUHJSQ8IHqKFpPkaTVbTYdIUzM= go.opentelemetry.io/collector/pipeline v0.125.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= go.opentelemetry.io/collector/processor v1.31.0 h1:+u7sBUpnCBsHYoALp4hfr9VEjLHHYa4uKENGITe0K9Q= go.opentelemetry.io/collector/processor v1.31.0/go.mod h1:5hDYJ7/hTdfd2tF2Rj5Hs6+mfyFz2O7CaPzVvW1qHQc= -go.opentelemetry.io/collector/processor/processortest v0.124.0 h1:qcyo0dSWmgpNFxjObsKk3Rd/wWV8CkMevd+jApkTQWE= -go.opentelemetry.io/collector/processor/processortest v0.124.0/go.mod h1:1YDTxd4c/uVU3Ui1+AzvYW94mo5DbhNmB1xSof6zvD0= -go.opentelemetry.io/collector/processor/xprocessor v0.124.0 h1:KAe8gIje8TcB8varZ4PDy0HV5xX5rNdaQ7q46BE915w= -go.opentelemetry.io/collector/processor/xprocessor v0.124.0/go.mod h1:ItJBBlR6/141vg1v4iRrcsBrGjPCgmXAztxS2x2YkdI= +go.opentelemetry.io/collector/processor/processortest v0.125.0 h1:ZVAN4iZPDcWhpzKqnuok2NIuS5hwGVVQUOWkJFR12tA= +go.opentelemetry.io/collector/processor/processortest v0.125.0/go.mod h1:VAw0IRG35cWTBjBtreXeXJEgqkRegfjrH/EuLhNX2+I= +go.opentelemetry.io/collector/processor/xprocessor v0.125.0 h1:VWYPMW1VmDq6xB7M5SYjBpQCCIq3MhQ3W++wU47QpZM= +go.opentelemetry.io/collector/processor/xprocessor v0.125.0/go.mod h1:bCxUyFVlksANg8wjYZqWVsRB33lkLQ294rTrju/IZiM= go.opentelemetry.io/collector/semconv v0.125.0 h1:SyRP617YGvNSWRSKMy7Lbk9RaJSR+qFAAfyxJOeZe4s= go.opentelemetry.io/collector/semconv v0.125.0/go.mod h1:te6VQ4zZJO5Lp8dM2XIhDxDiL45mwX0YAQQWRQ0Qr9U= go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpoRF40MlwNCA+Oo93kXU= From 9d7a37ae1866f9642ccfea5486c520e7a56e5f2c Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Sun, 4 May 2025 11:17:22 +0200 Subject: [PATCH 19/39] Bump golangci-lint to v2.1.5 (#16545) Signed-off-by: Matthieu MOREL --- .github/workflows/ci.yml | 2 +- .golangci.yml | 10 +--------- Makefile.common | 2 +- scripts/golangci-lint.yml | 2 +- 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cbbef8b699..fadf67ade0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -206,7 +206,7 @@ jobs: with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v2.0.2 + version: v2.1.5 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' diff --git a/.golangci.yml b/.golangci.yml index 37b488f812..4616feab55 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -10,10 +10,6 @@ formatters: - prefix(github.com/prometheus/prometheus) gofumpt: extra-rules: true - exclusions: - paths: - # Skip autogenerated files. - - ^.*\.(pb|y)\.go$ issues: max-issues-per-linter: 0 @@ -72,9 +68,7 @@ linters: - linters: - godot source: "^// ===" - - linters: - - perfsprint - text: "fmt.Sprintf can be replaced with string concatenation" + warn-unused: true settings: depguard: rules: @@ -168,8 +162,6 @@ linters: disable: - float-compare - go-require - - len # FIXME - - useless-assert # FIXME: wait for golangci-lint > v2.0.2 enable-all: true output: diff --git a/Makefile.common b/Makefile.common index 81bad5f42d..d8b7989096 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v2.0.2 +GOLANGCI_LINT_VERSION ?= v2.1.5 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 3893ef86b1..672dd424d6 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -36,4 +36,4 @@ jobs: uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 with: args: --verbose - version: v2.0.2 + version: v2.1.5 From c3ce1f1927483364cae2503ba8c1a2e9693d721e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 May 2025 11:50:04 +0200 Subject: [PATCH 20/39] chore(deps): bump actions/setup-node from 4.3.0 to 4.4.0 (#16533) Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.3.0 to 4.4.0. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/cdca7365b2dadb8aad0a33bc7601856ffabcc48e...49933ea5288caeca8642d1e84afbd3f7d6820020) --- updated-dependencies: - dependency-name: actions/setup-node dependency-version: 4.4.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fadf67ade0..ad4eea7f3d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -254,7 +254,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: prometheus/promci@443c7fc2397e946bc9f5029e313a9c3441b9b86d # v0.4.7 - name: Install nodejs - uses: actions/setup-node@cdca7365b2dadb8aad0a33bc7601856ffabcc48e # v4.3.0 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" From 2c0f02a7022f6105efdd71c49da8c1ca6c5c663c Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 5 May 2025 16:23:08 +1000 Subject: [PATCH 21/39] promql: don't emit a value from `histogram_fraction` or `histogram_quantile` if classic and native histograms are present at the same timestamp Signed-off-by: Charles Korn --- promql/engine.go | 3 ++- promql/functions.go | 8 ++++++++ promql/promqltest/testdata/histograms.test | 22 ++++++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/promql/engine.go b/promql/engine.go index d5a192f8ba..a2738fdc1e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1202,7 +1202,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota mb.buckets = append(mb.buckets, Bucket{upperBound, sample.F}) } - for _, sample := range enh.nativeHistogramSamples { + for idx, sample := range enh.nativeHistogramSamples { // We have to reconstruct the exact same signature as above for // a classic histogram, just ignoring any le label. enh.lblBuf = sample.Metric.Bytes(enh.lblBuf) @@ -1212,6 +1212,7 @@ func (enh *EvalNodeHelper) resetHistograms(inVec Vector, arg parser.Expr) annota // labels. Do not evaluate anything. annos.Add(annotations.NewMixedClassicNativeHistogramsWarning(sample.Metric.Get(labels.MetricName), arg.PositionRange())) delete(enh.signatureToMetricWithBuckets, string(enh.lblBuf)) + enh.nativeHistogramSamples[idx].H = nil continue } } diff --git a/promql/functions.go b/promql/functions.go index 0662c8d451..41526fcb16 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -1404,6 +1404,10 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev // Deal with the native histograms. for _, sample := range enh.nativeHistogramSamples { + if sample.H == nil { + // Native histogram conflicts with classic histogram at the same timestamp, ignore. + continue + } if !enh.enableDelayedNameRemoval { sample.Metric = sample.Metric.DropMetricName() } @@ -1446,6 +1450,10 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev // Deal with the native histograms. for _, sample := range enh.nativeHistogramSamples { + if sample.H == nil { + // Native histogram conflicts with classic histogram at the same timestamp, ignore. + continue + } if !enh.enableDelayedNameRemoval { sample.Metric = sample.Metric.DropMetricName() } diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 45492d89f3..68f646dd9a 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -584,3 +584,25 @@ eval instant at 10m histogram_count(increase(histogram_with_reset[15m])) eval instant at 10m histogram_sum(increase(histogram_with_reset[15m])) {} 91.5 + +clear + +# Test histogram_quantile and histogram_fraction with conflicting classic and native histograms. +load 1m + series{host="a"} {{schema:0 sum:5 count:4 buckets:[9 2 1]}} + series{host="a", le="0.1"} 2 + series{host="a", le="1"} 3 + series{host="a", le="10"} 5 + series{host="a", le="100"} 6 + series{host="a", le="1000"} 8 + series{host="a", le="+Inf"} 9 + +eval instant at 0 histogram_quantile(0.8, series) + expect no_info + expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series" + # Should return no results. + +eval instant at 0 histogram_fraction(-Inf, 1, series) + expect no_info + expect warn msg: PromQL warning: vector contains a mix of classic and native histograms for metric name "series" + # Should return no results. From a1c157aaefeec952a85c0fa14fdcd518c55b8d9d Mon Sep 17 00:00:00 2001 From: chardch Date: Thu, 17 Apr 2025 13:09:11 -0700 Subject: [PATCH 22/39] Add global config option for always_scrape_classic_histograms Addresses https://github.com/prometheus/prometheus/issues/16371 This will help with migrating to native histograms with `convert_classic_histograms_to_nhcb` since users may still need to keep the classic histograms during a migration Signed-off-by: chardch --- config/config.go | 33 +++++-- config/config_test.go | 91 ++++++++++++++++--- ...isable_always_scrape_classic_hist.good.yml | 6 ++ ...enable_always_scrape_classic_hist.good.yml | 6 ++ ...isable_always_scrape_classic_hist.good.yml | 7 ++ ...enable_always_scrape_classic_hist.good.yml | 7 ++ docs/configuration/configuration.md | 8 +- scrape/scrape.go | 4 +- scrape/scrape_test.go | 18 ++-- 9 files changed, 145 insertions(+), 35 deletions(-) create mode 100644 config/testdata/global_disable_always_scrape_classic_hist.good.yml create mode 100644 config/testdata/global_enable_always_scrape_classic_hist.good.yml create mode 100644 config/testdata/local_disable_always_scrape_classic_hist.good.yml create mode 100644 config/testdata/local_enable_always_scrape_classic_hist.good.yml diff --git a/config/config.go b/config/config.go index 09c79b3501..1ef638bb88 100644 --- a/config/config.go +++ b/config/config.go @@ -169,6 +169,7 @@ var ( // changes to DefaultNativeHistogramScrapeProtocols. ScrapeProtocols: DefaultScrapeProtocols, ConvertClassicHistogramsToNHCB: false, + AlwaysScrapeClassicHistograms: false, } DefaultRuntimeConfig = RuntimeConfig{ @@ -178,14 +179,13 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - AlwaysScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + // ScrapeTimeout, ScrapeInterval, ScrapeProtocols, AlwaysScrapeClassicHistograms, and ConvertClassicHistogramsToNHCB default to the configured globals. + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -489,6 +489,8 @@ type GlobalConfig struct { MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` // Whether to convert all scraped classic histograms into native histograms with custom buckets. ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -645,7 +647,8 @@ func (c *GlobalConfig) isZero() bool { c.QueryLogFile == "" && c.ScrapeFailureLogFile == "" && c.ScrapeProtocols == nil && - !c.ConvertClassicHistogramsToNHCB + !c.ConvertClassicHistogramsToNHCB && + !c.AlwaysScrapeClassicHistograms } // RuntimeConfig configures the values for the process behavior. @@ -690,7 +693,7 @@ type ScrapeConfig struct { // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"` // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. - AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` + AlwaysScrapeClassicHistograms *bool `yaml:"always_scrape_classic_histograms,omitempty"` // Whether to convert all scraped classic histograms into a native histogram with custom buckets. ConvertClassicHistogramsToNHCB *bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // File to which scrape failures are logged. @@ -904,6 +907,11 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { c.ConvertClassicHistogramsToNHCB = &global } + if c.AlwaysScrapeClassicHistograms == nil { + global := globalConfig.AlwaysScrapeClassicHistograms + c.AlwaysScrapeClassicHistograms = &global + } + return nil } @@ -931,6 +939,11 @@ func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool { return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB } +// AlwaysScrapeClassicHistogramsEnabled returns whether to always scrape classic histograms. +func (c *ScrapeConfig) AlwaysScrapeClassicHistogramsEnabled() bool { + return c.AlwaysScrapeClassicHistograms != nil && *c.AlwaysScrapeClassicHistograms +} + // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` diff --git a/config/config_test.go b/config/config_test.go index 6d59c7220d..379c171fd0 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -102,6 +102,7 @@ var expectedConf = &Config{ LabelNameLengthLimit: globLabelNameLengthLimit, LabelValueLengthLimit: globLabelValueLengthLimit, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, + AlwaysScrapeClassicHistograms: false, ConvertClassicHistogramsToNHCB: false, }, @@ -223,6 +224,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: "testdata/fail_prom.log", MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -339,6 +341,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.HTTPClientConfig{ @@ -440,6 +443,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -499,6 +503,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: "/metrics", @@ -536,6 +541,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -579,6 +585,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -622,6 +629,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -655,6 +663,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -696,6 +705,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -734,6 +744,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -779,6 +790,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -814,6 +826,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -852,6 +865,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -883,6 +897,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -917,6 +932,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: "/federate", @@ -951,6 +967,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -985,6 +1002,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1016,6 +1034,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1055,6 +1074,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1093,6 +1113,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1128,6 +1149,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1162,6 +1184,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1200,6 +1223,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1241,6 +1265,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1301,6 +1326,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1332,6 +1358,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, @@ -1374,6 +1401,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, @@ -1422,6 +1450,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1461,6 +1490,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.DefaultHTTPClientConfig, @@ -1495,6 +1525,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -1531,6 +1562,7 @@ var expectedConf = &Config{ ScrapeFailureLogFile: globScrapeFailureLogFile, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -2353,13 +2385,23 @@ func TestEmptyGlobalBlock(t *testing.T) { require.Equal(t, exp, *c) } +// ScrapeConfigOptions contains options for creating a scrape config. +type ScrapeConfigOptions struct { + JobName string + ScrapeInterval model.Duration + ScrapeTimeout model.Duration + AlwaysScrapeClassicHistograms bool + ConvertClassicHistToNHCB bool +} + func TestGetScrapeConfigs(t *testing.T) { - sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration, convertClassicHistToNHCB bool) *ScrapeConfig { + // Helper function to create a scrape config with the given options. + sc := func(opts ScrapeConfigOptions) *ScrapeConfig { return &ScrapeConfig{ - JobName: jobName, + JobName: opts.JobName, HonorTimestamps: true, - ScrapeInterval: scrapeInterval, - ScrapeTimeout: scrapeTimeout, + ScrapeInterval: opts.ScrapeInterval, + ScrapeTimeout: opts.ScrapeTimeout, ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, @@ -2380,7 +2422,8 @@ func TestGetScrapeConfigs(t *testing.T) { }, }, }, - ConvertClassicHistogramsToNHCB: boolPtr(convertClassicHistToNHCB), + AlwaysScrapeClassicHistograms: boolPtr(opts.AlwaysScrapeClassicHistograms), + ConvertClassicHistogramsToNHCB: boolPtr(opts.ConvertClassicHistToNHCB), } } @@ -2393,20 +2436,20 @@ func TestGetScrapeConfigs(t *testing.T) { { name: "An included config file should be a valid global config.", configFile: "testdata/scrape_config_files.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false)}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, }, { name: "A global config that only include a scrape config file.", configFile: "testdata/scrape_config_files_only.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false)}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, }, { name: "A global config that combine scrape config files and scrape configs.", configFile: "testdata/scrape_config_files_combined.good.yml", expectedResult: []*ScrapeConfig{ - sc("node", model.Duration(60*time.Second), model.Duration(10*time.Second), false), - sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false), - sc("alertmanager", model.Duration(60*time.Second), model.Duration(10*time.Second), false), + sc(ScrapeConfigOptions{JobName: "node", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}), + sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}), + sc(ScrapeConfigOptions{JobName: "alertmanager", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false}), }, }, { @@ -2422,6 +2465,7 @@ func TestGetScrapeConfigs(t *testing.T) { ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), MetricsPath: DefaultScrapeConfig.MetricsPath, @@ -2458,6 +2502,7 @@ func TestGetScrapeConfigs(t *testing.T) { ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, MetricNameValidationScheme: UTF8ValidationConfig, MetricNameEscapingScheme: model.AllowUTF8, + AlwaysScrapeClassicHistograms: boolPtr(false), ConvertClassicHistogramsToNHCB: boolPtr(false), HTTPClientConfig: config.HTTPClientConfig{ @@ -2509,17 +2554,37 @@ func TestGetScrapeConfigs(t *testing.T) { { name: "A global config that enables convert classic histograms to nhcb.", configFile: "testdata/global_convert_classic_hist_to_nhcb.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), true)}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})}, }, { name: "A global config that enables convert classic histograms to nhcb and scrape config that disables the conversion", configFile: "testdata/local_disable_convert_classic_hist_to_nhcb.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), false)}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, }, { name: "A global config that disables convert classic histograms to nhcb and scrape config that enables the conversion", configFile: "testdata/local_convert_classic_hist_to_nhcb.good.yml", - expectedResult: []*ScrapeConfig{sc("prometheus", model.Duration(60*time.Second), model.Duration(10*time.Second), true)}, + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: true})}, + }, + { + name: "A global config that enables always scrape classic histograms", + configFile: "testdata/global_enable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that disables always scrape classic histograms", + configFile: "testdata/global_disable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that disables always scrape classic histograms and scrape config that enables it", + configFile: "testdata/local_enable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: true, ConvertClassicHistToNHCB: false})}, + }, + { + name: "A global config that enables always scrape classic histograms and scrape config that disables it", + configFile: "testdata/local_disable_always_scrape_classic_hist.good.yml", + expectedResult: []*ScrapeConfig{sc(ScrapeConfigOptions{JobName: "prometheus", ScrapeInterval: model.Duration(60 * time.Second), ScrapeTimeout: model.Duration(10 * time.Second), AlwaysScrapeClassicHistograms: false, ConvertClassicHistToNHCB: false})}, }, } diff --git a/config/testdata/global_disable_always_scrape_classic_hist.good.yml b/config/testdata/global_disable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..de28f1357a --- /dev/null +++ b/config/testdata/global_disable_always_scrape_classic_hist.good.yml @@ -0,0 +1,6 @@ +global: + always_scrape_classic_histograms: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/global_enable_always_scrape_classic_hist.good.yml b/config/testdata/global_enable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..d42cf69cb6 --- /dev/null +++ b/config/testdata/global_enable_always_scrape_classic_hist.good.yml @@ -0,0 +1,6 @@ +global: + always_scrape_classic_histograms: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] diff --git a/config/testdata/local_disable_always_scrape_classic_hist.good.yml b/config/testdata/local_disable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..9e668340dc --- /dev/null +++ b/config/testdata/local_disable_always_scrape_classic_hist.good.yml @@ -0,0 +1,7 @@ +global: + always_scrape_classic_histograms: true +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + always_scrape_classic_histograms: false diff --git a/config/testdata/local_enable_always_scrape_classic_hist.good.yml b/config/testdata/local_enable_always_scrape_classic_hist.good.yml new file mode 100644 index 0000000000..165be07754 --- /dev/null +++ b/config/testdata/local_enable_always_scrape_classic_hist.good.yml @@ -0,0 +1,7 @@ +global: + always_scrape_classic_histograms: false +scrape_configs: + - job_name: prometheus + static_configs: + - targets: ['localhost:8080'] + always_scrape_classic_histograms: true diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index aa92b40fd8..e701356506 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -144,6 +144,11 @@ global: # histograms with custom buckets. [ convert_classic_histograms_to_nhcb | default = false] + # Specifies whether to scrape a classic histogram, even if it is also exposed as a native + # histogram (has no effect without --enable-feature=native-histograms). + [ always_scrape_classic_histograms: | default = false ] + + runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC @@ -244,7 +249,8 @@ job_name: # Whether to scrape a classic histogram, even if it is also exposed as a native # histogram (has no effect without --enable-feature=native-histograms). -[ always_scrape_classic_histograms: | default = false ] +[ always_scrape_classic_histograms: | +default = ] # The HTTP resource path on which to fetch metrics from targets. [ metrics_path: | default = /metrics ] diff --git a/scrape/scrape.go b/scrape/scrape.go index 704726bf41..1c4b5c565d 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -366,7 +366,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() - alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled() convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled() ) @@ -522,7 +522,7 @@ func (sp *scrapePool) sync(targets []*Target) { trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() - alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistogramsEnabled() convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCBEnabled() ) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 3ddb767356..8cf84a4aec 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -4635,26 +4635,26 @@ metric: < fals := false for metricsTextName, metricsText := range metricsTexts { for name, tc := range map[string]struct { - alwaysScrapeClassicHistograms bool + alwaysScrapeClassicHistograms *bool convertClassicHistToNHCB *bool }{ "convert with scrape": { - alwaysScrapeClassicHistograms: true, + alwaysScrapeClassicHistograms: &tru, convertClassicHistToNHCB: &tru, }, "convert without scrape": { - alwaysScrapeClassicHistograms: false, + alwaysScrapeClassicHistograms: &fals, convertClassicHistToNHCB: &tru, }, "scrape without convert": { - alwaysScrapeClassicHistograms: true, + alwaysScrapeClassicHistograms: &tru, convertClassicHistToNHCB: &fals, }, "scrape with nil convert": { - alwaysScrapeClassicHistograms: true, + alwaysScrapeClassicHistograms: &tru, }, "neither scrape nor convert": { - alwaysScrapeClassicHistograms: false, + alwaysScrapeClassicHistograms: &fals, convertClassicHistToNHCB: &fals, }, } { @@ -4664,7 +4664,7 @@ metric: < expectedNativeHistCount = 1 expectCustomBuckets = false expectedClassicHistCount = 0 - if metricsText.hasClassic && tc.alwaysScrapeClassicHistograms { + if metricsText.hasClassic && tc.alwaysScrapeClassicHistograms != nil && *tc.alwaysScrapeClassicHistograms { expectedClassicHistCount = 1 } } else if metricsText.hasClassic { @@ -4672,11 +4672,11 @@ metric: < case tc.convertClassicHistToNHCB == nil || !*tc.convertClassicHistToNHCB: expectedClassicHistCount = 1 expectedNativeHistCount = 0 - case tc.alwaysScrapeClassicHistograms && *tc.convertClassicHistToNHCB: + case tc.alwaysScrapeClassicHistograms != nil && *tc.alwaysScrapeClassicHistograms && *tc.convertClassicHistToNHCB: expectedClassicHistCount = 1 expectedNativeHistCount = 1 expectCustomBuckets = true - case !tc.alwaysScrapeClassicHistograms && *tc.convertClassicHistToNHCB: + case (tc.alwaysScrapeClassicHistograms == nil || !*tc.alwaysScrapeClassicHistograms) && *tc.convertClassicHistToNHCB: expectedClassicHistCount = 0 expectedNativeHistCount = 1 expectCustomBuckets = true From a177376d722e1717a357391fec345088f9a29152 Mon Sep 17 00:00:00 2001 From: Neeraj Gartia Date: Mon, 5 May 2025 23:23:35 +0530 Subject: [PATCH 23/39] fix promql-test error message and readme Signed-off-by: Neeraj Gartia --- promql/promqltest/README.md | 10 +++++----- promql/promqltest/test.go | 2 +- promql/promqltest/test_test.go | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/promql/promqltest/README.md b/promql/promqltest/README.md index 8854e74724..5ac0d02adb 100644 --- a/promql/promqltest/README.md +++ b/promql/promqltest/README.md @@ -111,7 +111,7 @@ eval range from to step ### `expect` Syntax ``` -expect +expect : ``` #### Parameters @@ -139,8 +139,8 @@ eval instant at 1m sum by (env) (my_metric) {env="test"} 20 eval range from 0 to 3m step 1m sum by (env) (my_metric) - expect warn msg something went wrong - expect info regex something went (wrong|boom) + expect warn msg: something went wrong + expect info regex: something went (wrong|boom) {env="prod"} 2 5 10 20 {env="test"} 10 20 30 45 @@ -148,10 +148,10 @@ eval instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) expect fail eval instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) -expect fail msg "vector cannot contain metrics with the same labelset" +expect fail msg: "vector cannot contain metrics with the same labelset" eval instant at 1m ceil({__name__=~'testmetric1|testmetric2'}) -expect fail regex "vector cannot contain metrics .*|something else went wrong" +expect fail regex: "vector cannot contain metrics .*|something else went wrong" eval instant at 1m sum by (env) (my_metric) expect ordered diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 7e781121b8..1754f6635d 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -271,7 +271,7 @@ func parseExpect(defLine string) (expectCmdType, expectCmd, error) { expectParts := patExpect.FindStringSubmatch(strings.TrimSpace(defLine)) expCmd := expectCmd{} if expectParts == nil { - return 0, expCmd, errors.New("invalid expect statement, must match `expect ` format") + return 0, expCmd, errors.New("invalid expect statement, must match `expect : ` format") } var ( mode = expectParts[1] diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index 184cfbee1a..333d665680 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -699,7 +699,7 @@ load 5m eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'}) expect fail error: something went wrong `, - expectedError: "error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid expect statement, must match `expect ` format", + expectedError: "error in eval ceil({__name__=~'testmetric1|testmetric2'}) (line 7): invalid expect statement, must match `expect : ` format", }, "instant query expected not to care about annotations (with new eval syntax)": { input: ` From b59b76639f3a62b2fa8430893d33bc48df87cd4f Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 5 May 2025 14:27:23 +0200 Subject: [PATCH 24/39] chore(scripts/sync_repo_files): add link to source script Signed-off-by: machine424 --- scripts/sync_repo_files.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 1029336298..09b0e4d93a 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -10,7 +10,7 @@ git_user="prombot" branch="repo_sync" commit_msg="Update common Prometheus files" pr_title="Synchronize common files from prometheus/prometheus" -pr_msg="Propagating changes from prometheus/prometheus default branch." +pr_msg="Propagating changes from prometheus/prometheus default branch.\n\n*Source can be found [here](https://github.com/prometheus/prometheus/blob/main/scripts/sync_repo_files.sh).*" orgs="prometheus prometheus-community" color_red='\e[31m' From 7e49b91d9a6a409100addd635c3d1e5d8df10719 Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Tue, 6 May 2025 15:45:16 +0200 Subject: [PATCH 25/39] tsdb/errors.MultiError: support errors.As (#16544) * tsdb/errors.MultiError: implement Unwrap the multierror was hiding some errors in Mimir. I also added unit tests because I had them handy from a similar change I and yuri did in XXX and some time ago --------- Signed-off-by: Dimitar Dimitrov Co-authored-by: Arve Knudsen --- tsdb/errors/errors.go | 5 ++ tsdb/errors/errors_test.go | 172 +++++++++++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 tsdb/errors/errors_test.go diff --git a/tsdb/errors/errors.go b/tsdb/errors/errors.go index a86ce59bd8..ded4ae3a27 100644 --- a/tsdb/errors/errors.go +++ b/tsdb/errors/errors.go @@ -94,6 +94,11 @@ func (es nonNilMultiError) Is(target error) bool { return false } +// Unwrap returns the list of errors contained in the multiError. +func (es nonNilMultiError) Unwrap() []error { + return es.errs +} + // CloseAll closes all given closers while recording error in MultiError. func CloseAll(cs []io.Closer) error { errs := NewMulti() diff --git a/tsdb/errors/errors_test.go b/tsdb/errors/errors_test.go new file mode 100644 index 0000000000..146c66bf00 --- /dev/null +++ b/tsdb/errors/errors_test.go @@ -0,0 +1,172 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMultiError_Is(t *testing.T) { + customErr1 := errors.New("test error 1") + customErr2 := errors.New("test error 2") + + testCases := map[string]struct { + sourceErrors []error + target error + is bool + }{ + "adding a context cancellation doesn't lose the information": { + sourceErrors: []error{context.Canceled}, + target: context.Canceled, + is: true, + }, + "adding multiple context cancellations doesn't lose the information": { + sourceErrors: []error{context.Canceled, context.Canceled}, + target: context.Canceled, + is: true, + }, + "adding wrapped context cancellations doesn't lose the information": { + sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled)}, + target: context.Canceled, + is: true, + }, + "adding a nil error doesn't lose the information": { + sourceErrors: []error{errors.New("some error"), fmt.Errorf("some message: %w", context.Canceled), nil}, + target: context.Canceled, + is: true, + }, + "errors with no context cancellation error are not a context canceled error": { + sourceErrors: []error{errors.New("first error"), errors.New("second error")}, + target: context.Canceled, + is: false, + }, + "no errors are not a context canceled error": { + sourceErrors: nil, + target: context.Canceled, + is: false, + }, + "no errors are a nil error": { + sourceErrors: nil, + target: nil, + is: true, + }, + "nested multi-error contains customErr1": { + sourceErrors: []error{ + customErr1, + NewMulti( + customErr2, + fmt.Errorf("wrapped %w", context.Canceled), + ).Err(), + }, + target: customErr1, + is: true, + }, + "nested multi-error contains customErr2": { + sourceErrors: []error{ + customErr1, + NewMulti( + customErr2, + fmt.Errorf("wrapped %w", context.Canceled), + ).Err(), + }, + target: customErr2, + is: true, + }, + "nested multi-error contains wrapped context.Canceled": { + sourceErrors: []error{ + customErr1, + NewMulti( + customErr2, + fmt.Errorf("wrapped %w", context.Canceled), + ).Err(), + }, + target: context.Canceled, + is: true, + }, + "nested multi-error does not contain context.DeadlineExceeded": { + sourceErrors: []error{ + customErr1, + NewMulti( + customErr2, + fmt.Errorf("wrapped %w", context.Canceled), + ).Err(), + }, + target: context.DeadlineExceeded, + is: false, // make sure we still return false in valid cases + }, + } + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + mErr := NewMulti(testCase.sourceErrors...) + require.Equal(t, testCase.is, errors.Is(mErr.Err(), testCase.target)) + }) + } +} + +func TestMultiError_As(t *testing.T) { + tE1 := testError{"error cause 1"} + tE2 := testError{"error cause 2"} + var target testError + testCases := map[string]struct { + sourceErrors []error + target error + as bool + }{ + "MultiError containing only a testError can be cast to that testError": { + sourceErrors: []error{tE1}, + target: tE1, + as: true, + }, + "MultiError containing multiple testErrors can be cast to the first testError added": { + sourceErrors: []error{tE1, tE2}, + target: tE1, + as: true, + }, + "MultiError containing multiple errors can be cast to the first testError added": { + sourceErrors: []error{context.Canceled, tE1, context.DeadlineExceeded, tE2}, + target: tE1, + as: true, + }, + "MultiError not containing a testError cannot be cast to a testError": { + sourceErrors: []error{context.Canceled, context.DeadlineExceeded}, + as: false, + }, + } + + for testName, testCase := range testCases { + t.Run(testName, func(t *testing.T) { + mErr := NewMulti(testCase.sourceErrors...).Err() + if testCase.as { + require.ErrorAs(t, mErr, &target) + require.Equal(t, testCase.target, target) + } else { + require.NotErrorAs(t, mErr, &target) + } + }) + } +} + +type testError struct { + cause string +} + +func (e testError) Error() string { + return fmt.Sprintf("testError[cause: %s]", e.cause) +} From ca70ed4fed3b5f41fe8fb14d27244a21c2c2b61a Mon Sep 17 00:00:00 2001 From: machine424 Date: Tue, 29 Oct 2024 16:43:50 +0100 Subject: [PATCH 26/39] test(cmd): add test for GOGC setting As suggested in https://github.com/prometheus/prometheus/pull/14176#issuecomment-2150308054 Signed-off-by: machine424 --- cmd/prometheus/main_test.go | 120 ++++++++++++++++++++++++++++++++++ cmd/prometheus/reload_test.go | 16 +++-- 2 files changed, 131 insertions(+), 5 deletions(-) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 5bb72dd2c2..eb96b6fa96 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "math" + "net/http" "os" "os/exec" "path/filepath" @@ -33,6 +34,7 @@ import ( "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" @@ -41,6 +43,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/util/testutil" ) func init() { @@ -646,3 +649,120 @@ func TestRwProtoMsgFlagParser(t *testing.T) { }) } } + +func getGaugeValue(t *testing.T, body io.ReadCloser, metricName string) (float64, error) { + t.Helper() + + p := expfmt.TextParser{} + metricFamilies, err := p.TextToMetricFamilies(body) + if err != nil { + return 0, err + } + metricFamily, ok := metricFamilies[metricName] + if !ok { + return 0, errors.New("metric family not found") + } + metric := metricFamily.GetMetric() + if len(metric) != 1 { + return 0, errors.New("metric not found") + } + return metric[0].GetGauge().GetValue(), nil +} + +func TestRuntimeGOGCConfig(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + t.Parallel() + + for _, tc := range []struct { + name string + config string + gogcEnvVar string + expectedGOGC int + }{ + { + name: "empty config file", + expectedGOGC: 75, + }, + // the GOGC env var is ignored in this case, see https://github.com/prometheus/prometheus/issues/16334 + /* { + name: "empty config file with GOGC env var set", + gogcEnvVar: "66", + expectedGOGC: 66, + }, */ + { + name: "gogc set through config", + config: ` +runtime: + gogc: 77`, + expectedGOGC: 77, + }, + { + name: "gogc set through config and env var", + config: ` +runtime: + gogc: 77`, + gogcEnvVar: "88", + expectedGOGC: 77, + }, + { + name: "incomplete runtime block", + config: ` +runtime:`, + expectedGOGC: 75, + }, + { + name: "incomplete runtime block and GOGC env var set", + config: ` +runtime:`, + gogcEnvVar: "88", + expectedGOGC: 88, + }, + // the GOGC env var is ignored in this case, see https://github.com/prometheus/prometheus/issues/16334 + /* { + name: "unrelated config and GOGC env var set", + config: ` + global: + scrape_interval: 500ms`, + gogcEnvVar: "80", + expectedGOGC: 80, + }, */ + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + tmpDir := t.TempDir() + configFile := filepath.Join(tmpDir, "prometheus.yml") + + port := testutil.RandomUnprivilegedPort(t) + os.WriteFile(configFile, []byte(tc.config), 0o777) + prom := prometheusCommandWithLogging(t, configFile, port, fmt.Sprintf("--storage.tsdb.path=%s", tmpDir)) + // Inject GOGC when set. + prom.Env = os.Environ() + if tc.gogcEnvVar != "" { + prom.Env = append(prom.Env, fmt.Sprintf("GOGC=%s", tc.gogcEnvVar)) + } + require.NoError(t, prom.Start()) + + var ( + r *http.Response + err error + ) + // Wait for the /metrics endpoint to be ready. + require.Eventually(t, func() bool { + r, err = http.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", port)) + if err != nil { + return false + } + return r.StatusCode == http.StatusOK + }, 5*time.Second, 50*time.Millisecond) + defer r.Body.Close() + + // Check the final GOGC that's set, consider go_gc_gogc_percent from /metrics as source of truth. + gogc, err := getGaugeValue(t, r.Body, "go_gc_gogc_percent") + require.NoError(t, err) + require.Equal(t, float64(tc.expectedGOGC), gogc) + }) + } +} diff --git a/cmd/prometheus/reload_test.go b/cmd/prometheus/reload_test.go index 18a7ff2ad1..c59e51b316 100644 --- a/cmd/prometheus/reload_test.go +++ b/cmd/prometheus/reload_test.go @@ -119,7 +119,8 @@ func runTestSteps(t *testing.T, steps []struct { require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file") port := testutil.RandomUnprivilegedPort(t) - runPrometheusWithLogging(t, configFilePath, port) + prom := prometheusCommandWithLogging(t, configFilePath, port, "--enable-feature=auto-reload-config", "--config.auto-reload-interval=1s") + require.NoError(t, prom.Start()) baseURL := "http://localhost:" + strconv.Itoa(port) require.Eventually(t, func() bool { @@ -197,14 +198,20 @@ func captureLogsToTLog(t *testing.T, r io.Reader) { } } -func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) { +func prometheusCommandWithLogging(t *testing.T, configFilePath string, port int, extraArgs ...string) *exec.Cmd { stdoutPipe, stdoutWriter := io.Pipe() stderrPipe, stderrWriter := io.Pipe() var wg sync.WaitGroup wg.Add(2) - prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port)) + args := []string{ + "-test.main", + "--config.file=" + configFilePath, + "--web.listen-address=0.0.0.0:" + strconv.Itoa(port), + } + args = append(args, extraArgs...) + prom := exec.Command(promPath, args...) prom.Stdout = stdoutWriter prom.Stderr = stderrWriter @@ -224,6 +231,5 @@ func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) { stderrWriter.Close() wg.Wait() }) - - require.NoError(t, prom.Start()) + return prom } From b07b5521395e9a6ed008895a7b3ca1d785a1467e Mon Sep 17 00:00:00 2001 From: Andre Branchizio Date: Tue, 6 May 2025 11:54:48 -0600 Subject: [PATCH 27/39] [PERF] TSDB: Pass down label value limit into implementation (#16158) * allow limiting label values calls Signed-off-by: Andre Branchizio --- cmd/promtool/tsdb.go | 6 +++--- tsdb/block.go | 16 ++++++++-------- tsdb/block_test.go | 6 +++--- tsdb/head_read.go | 10 +++++----- tsdb/head_test.go | 10 +++++----- tsdb/index/index.go | 18 ++++++++++++++---- tsdb/index/index_test.go | 2 +- tsdb/index/postings.go | 6 +++++- tsdb/ooo_head_read.go | 10 +++++----- tsdb/ooo_head_read_test.go | 8 ++++---- tsdb/querier.go | 15 +++++++++++---- tsdb/querier_bench_test.go | 2 +- tsdb/querier_test.go | 22 ++++++++++++++-------- 13 files changed, 79 insertions(+), 52 deletions(-) diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index fe490bbeaf..f512728ac9 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -552,7 +552,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten postingInfos = postingInfos[:0] for _, n := range allLabelNames { - values, err := ir.SortedLabelValues(ctx, n, selectors...) + values, err := ir.SortedLabelValues(ctx, n, nil, selectors...) if err != nil { return err } @@ -568,7 +568,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten postingInfos = postingInfos[:0] for _, n := range allLabelNames { - lv, err := ir.SortedLabelValues(ctx, n, selectors...) + lv, err := ir.SortedLabelValues(ctx, n, nil, selectors...) if err != nil { return err } @@ -578,7 +578,7 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten printInfo(postingInfos) postingInfos = postingInfos[:0] - lv, err := ir.SortedLabelValues(ctx, "__name__", selectors...) + lv, err := ir.SortedLabelValues(ctx, "__name__", nil, selectors...) if err != nil { return err } diff --git a/tsdb/block.go b/tsdb/block.go index 7f7d993800..7d243f8bf7 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -66,10 +66,10 @@ type IndexReader interface { Symbols() index.StringIter // SortedLabelValues returns sorted possible label values. - SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) + SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) // LabelValues returns possible label values which may not be sorted. - LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) + LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) // Postings returns the postings list iterator for the label pairs. // The Postings here contain the offsets to the series inside the index. @@ -475,14 +475,14 @@ func (r blockIndexReader) Symbols() index.StringIter { return r.ir.Symbols() } -func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { var st []string var err error if len(matchers) == 0 { - st, err = r.ir.SortedLabelValues(ctx, name) + st, err = r.ir.SortedLabelValues(ctx, name, hints) } else { - st, err = r.LabelValues(ctx, name, matchers...) + st, err = r.LabelValues(ctx, name, hints, matchers...) if err == nil { slices.Sort(st) } @@ -493,16 +493,16 @@ func (r blockIndexReader) SortedLabelValues(ctx context.Context, name string, ma return st, nil } -func (r blockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (r blockIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) == 0 { - st, err := r.ir.LabelValues(ctx, name) + st, err := r.ir.LabelValues(ctx, name, hints) if err != nil { return st, fmt.Errorf("block: %s: %w", r.b.Meta().ULID, err) } return st, nil } - return labelValuesWithMatchers(ctx, r.ir, name, matchers...) + return labelValuesWithMatchers(ctx, r.ir, name, hints, matchers...) } func (r blockIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, error) { diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 776beb4396..0f892a3782 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -299,11 +299,11 @@ func TestLabelValuesWithMatchers(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - actualValues, err := indexReader.SortedLabelValues(ctx, tt.labelName, tt.matchers...) + actualValues, err := indexReader.SortedLabelValues(ctx, tt.labelName, nil, tt.matchers...) require.NoError(t, err) require.Equal(t, tt.expectedValues, actualValues) - actualValues, err = indexReader.LabelValues(ctx, tt.labelName, tt.matchers...) + actualValues, err = indexReader.LabelValues(ctx, tt.labelName, nil, tt.matchers...) sort.Strings(actualValues) require.NoError(t, err) require.Equal(t, tt.expectedValues, actualValues) @@ -459,7 +459,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { b.ReportAllocs() for benchIdx := 0; benchIdx < b.N; benchIdx++ { - actualValues, err := indexReader.LabelValues(ctx, "b_tens", matchers...) + actualValues, err := indexReader.LabelValues(ctx, "b_tens", nil, matchers...) require.NoError(b, err) require.Len(b, actualValues, 9) } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index f37fd17d60..20495c30b3 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -61,8 +61,8 @@ func (h *headIndexReader) Symbols() index.StringIter { // specific label name that are within the time range mint to maxt. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. -func (h *headIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { - values, err := h.LabelValues(ctx, name, matchers...) +func (h *headIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + values, err := h.LabelValues(ctx, name, hints, matchers...) if err == nil { slices.Sort(values) } @@ -73,16 +73,16 @@ func (h *headIndexReader) SortedLabelValues(ctx context.Context, name string, ma // specific label name that are within the time range mint to maxt. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. -func (h *headIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (h *headIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { return []string{}, nil } if len(matchers) == 0 { - return h.head.postings.LabelValues(ctx, name), nil + return h.head.postings.LabelValues(ctx, name, hints), nil } - return labelValuesWithMatchers(ctx, h, name, matchers...) + return labelValuesWithMatchers(ctx, h, name, hints, matchers...) } // LabelNames returns all the unique label names present in the head diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 561c8c789d..485b8b7b1f 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -1216,7 +1216,7 @@ func TestHead_Truncate(t *testing.T) { ss = map[string]struct{}{} values[name] = ss } - for _, value := range h.postings.LabelValues(ctx, name) { + for _, value := range h.postings.LabelValues(ctx, name, nil) { ss[value] = struct{}{} } } @@ -3136,7 +3136,7 @@ func TestHeadLabelNamesValuesWithMinMaxRange(t *testing.T) { require.Equal(t, tt.expectedNames, actualLabelNames) if len(tt.expectedValues) > 0 { for i, name := range expectedLabelNames { - actualLabelValue, err := headIdxReader.SortedLabelValues(ctx, name) + actualLabelValue, err := headIdxReader.SortedLabelValues(ctx, name, nil) require.NoError(t, err) require.Equal(t, []string{tt.expectedValues[i]}, actualLabelValue) } @@ -3209,11 +3209,11 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { t.Run(tt.name, func(t *testing.T) { headIdxReader := head.indexRange(0, 200) - actualValues, err := headIdxReader.SortedLabelValues(ctx, tt.labelName, tt.matchers...) + actualValues, err := headIdxReader.SortedLabelValues(ctx, tt.labelName, nil, tt.matchers...) require.NoError(t, err) require.Equal(t, tt.expectedValues, actualValues) - actualValues, err = headIdxReader.LabelValues(ctx, tt.labelName, tt.matchers...) + actualValues, err = headIdxReader.LabelValues(ctx, tt.labelName, nil, tt.matchers...) sort.Strings(actualValues) require.NoError(t, err) require.Equal(t, tt.expectedValues, actualValues) @@ -3472,7 +3472,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) { b.ReportAllocs() for benchIdx := 0; benchIdx < b.N; benchIdx++ { - actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", matchers...) + actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", nil, matchers...) require.NoError(b, err) require.Len(b, actualValues, 9) } diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 42ecd7245d..edcb92a719 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -1493,8 +1493,8 @@ func (r *Reader) SymbolTableSize() uint64 { // SortedLabelValues returns value tuples that exist for the given label name. // It is not safe to use the return value beyond the lifetime of the byte slice // passed into the Reader. -func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { - values, err := r.LabelValues(ctx, name, matchers...) +func (r *Reader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + values, err := r.LabelValues(ctx, name, hints, matchers...) if err == nil && r.version == FormatV1 { slices.Sort(values) } @@ -1505,7 +1505,7 @@ func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers .. // It is not safe to use the return value beyond the lifetime of the byte slice // passed into the Reader. // TODO(replay): Support filtering by matchers. -func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (r *Reader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { return nil, fmt.Errorf("matchers parameter is not implemented: %+v", matchers) } @@ -1517,6 +1517,9 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe } values := make([]string, 0, len(e)) for k := range e { + if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit { + break + } values = append(values, k) } return values, nil @@ -1529,9 +1532,16 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe return nil, nil } - values := make([]string, 0, len(e)*symbolFactor) + valuesLength := len(e) * symbolFactor + if hints != nil && hints.Limit > 0 && valuesLength > hints.Limit { + valuesLength = hints.Limit + } + values := make([]string, 0, valuesLength) lastVal := e[len(e)-1].value err := r.traversePostingOffsets(ctx, e[0].off, func(val string, _ uint64) (bool, error) { + if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit { + return false, nil + } values = append(values, val) return val != lastVal, nil }) diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index e3fe5a41fd..17b4cc88dd 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -421,7 +421,7 @@ func TestPersistence_index_e2e(t *testing.T) { for k, v := range labelPairs { sort.Strings(v) - res, err := ir.SortedLabelValues(ctx, k) + res, err := ir.SortedLabelValues(ctx, k, nil) require.NoError(t, err) require.Len(t, res, len(v)) diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index e3ba5d64b4..7fdf64acca 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -168,11 +168,15 @@ func (p *MemPostings) LabelNames() []string { } // LabelValues returns label values for the given name. -func (p *MemPostings) LabelValues(_ context.Context, name string) []string { +func (p *MemPostings) LabelValues(_ context.Context, name string, hints *storage.LabelHints) []string { p.mtx.RLock() values := p.lvs[name] p.mtx.RUnlock() + if hints != nil && hints.Limit > 0 && len(values) > hints.Limit { + values = values[:hints.Limit] + } + // The slice from p.lvs[name] is shared between all readers, and it is append-only. // Since it's shared, we need to make a copy of it before returning it to make // sure that no caller modifies the original one by sorting it or filtering it. diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 5eb63edfd5..ddc5376df0 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -176,16 +176,16 @@ type multiMeta struct { // LabelValues needs to be overridden from the headIndexReader implementation // so we can return labels within either in-order range or ooo range. -func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() { return []string{}, nil } if len(matchers) == 0 { - return oh.head.postings.LabelValues(ctx, name), nil + return oh.head.postings.LabelValues(ctx, name, hints), nil } - return labelValuesWithMatchers(ctx, oh, name, matchers...) + return labelValuesWithMatchers(ctx, oh, name, hints, matchers...) } func lessByMinTimeAndMinRef(a, b chunks.Meta) int { @@ -484,11 +484,11 @@ func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *l return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, 0, chks) } -func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } -func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ ...*labels.Matcher) ([]string, error) { +func (ir *OOOCompactionHeadIndexReader) LabelValues(_ context.Context, _ string, _ *storage.LabelHints, _ ...*labels.Matcher) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 9dcf125b92..4fd29d0b1b 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -452,24 +452,24 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari // We first want to test using a head index reader that covers the biggest query interval oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMinT, tc.queryMaxT, 0) matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} - values, err := oh.LabelValues(ctx, "foo", matchers...) + values, err := oh.LabelValues(ctx, "foo", nil, matchers...) sort.Strings(values) require.NoError(t, err) require.Equal(t, tc.expValues1, values) matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^bar.")} - values, err = oh.LabelValues(ctx, "foo", matchers...) + values, err = oh.LabelValues(ctx, "foo", nil, matchers...) sort.Strings(values) require.NoError(t, err) require.Equal(t, tc.expValues2, values) matchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.")} - values, err = oh.LabelValues(ctx, "foo", matchers...) + values, err = oh.LabelValues(ctx, "foo", nil, matchers...) sort.Strings(values) require.NoError(t, err) require.Equal(t, tc.expValues3, values) - values, err = oh.LabelValues(ctx, "foo") + values, err = oh.LabelValues(ctx, "foo", nil) sort.Strings(values) require.NoError(t, err) require.Equal(t, tc.expValues4, values) diff --git a/tsdb/querier.go b/tsdb/querier.go index f7d564a2dd..0943c760cd 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -77,8 +77,8 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er }, nil } -func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, _ *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - res, err := q.index.SortedLabelValues(ctx, name, matchers...) +func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...) return res, nil, err } @@ -390,8 +390,9 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma return it, it.Err() } -func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) { - allValues, err := r.LabelValues(ctx, name) +func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + // Limit is applied at the end, after filtering. + allValues, err := r.LabelValues(ctx, name, nil) if err != nil { return nil, fmt.Errorf("fetching values of label %s: %w", name, err) } @@ -428,6 +429,9 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma // If we don't have any matchers for other labels, then we're done. if !hasMatchersForOtherLabels { + if hints != nil && hints.Limit > 0 && len(allValues) > hints.Limit { + allValues = allValues[:hints.Limit] + } return allValues, nil } @@ -451,6 +455,9 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma values := make([]string, 0, len(indexes)) for _, idx := range indexes { values = append(values, allValues[idx]) + if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit { + break + } } return values, nil diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index f5cc62d961..511166d2b5 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -228,7 +228,7 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) { for _, c := range cases { b.Run(c.name, func(b *testing.B) { for i := 0; i < b.N; i++ { - _, err := labelValuesWithMatchers(ctx, ir, c.labelName, c.matchers...) + _, err := labelValuesWithMatchers(ctx, ir, c.labelName, nil, c.matchers...) require.NoError(b, err) } }) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index cb96fa3716..cd3b15abc4 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2252,19 +2252,22 @@ func (m mockIndex) Close() error { return nil } -func (m mockIndex) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { - values, _ := m.LabelValues(ctx, name, matchers...) +func (m mockIndex) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { + values, _ := m.LabelValues(ctx, name, hints, matchers...) sort.Strings(values) return values, nil } -func (m mockIndex) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (m mockIndex) LabelValues(_ context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { var values []string if len(matchers) == 0 { for l := range m.postings { if l.Name == name { values = append(values, l.Value) + if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit { + break + } } } return values, nil @@ -2275,6 +2278,9 @@ func (m mockIndex) LabelValues(_ context.Context, name string, matchers ...*labe if matcher.Matches(series.l.Get(matcher.Name)) { // TODO(colega): shouldn't we check all the matchers before adding this to the values? values = append(values, series.l.Get(name)) + if hints != nil && hints.Limit > 0 && len(values) >= hints.Limit { + break + } } } } @@ -3299,12 +3305,12 @@ func (m mockMatcherIndex) Symbols() index.StringIter { return nil } func (m mockMatcherIndex) Close() error { return nil } // SortedLabelValues will return error if it is called. -func (m mockMatcherIndex) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) { +func (m mockMatcherIndex) SortedLabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return []string{}, errors.New("sorted label values called") } // LabelValues will return error if it is called. -func (m mockMatcherIndex) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) { +func (m mockMatcherIndex) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return []string{}, errors.New("label values called") } @@ -3736,7 +3742,7 @@ func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations) ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} - _, err := labelValuesWithMatchers(ctx, ir, "__name__", labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+")) + _, err := labelValuesWithMatchers(ctx, ir, "__name__", nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+")) require.Error(t, err) require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. @@ -3746,7 +3752,7 @@ type mockReaderOfLabels struct{} const mockReaderOfLabelsSeriesCount = checkContextEveryNIterations * 10 -func (m mockReaderOfLabels) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) { +func (m mockReaderOfLabels) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { return make([]string, mockReaderOfLabelsSeriesCount), nil } @@ -3754,7 +3760,7 @@ func (m mockReaderOfLabels) LabelValueFor(context.Context, storage.SeriesRef, st panic("LabelValueFor called") } -func (m mockReaderOfLabels) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) { +func (m mockReaderOfLabels) SortedLabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, error) { panic("SortedLabelValues called") } From 2bf6f4c9dcbb1ad2e8fef70c6a48d8fc44a7f57c Mon Sep 17 00:00:00 2001 From: dongjiang Date: Tue, 6 May 2025 19:38:46 +0800 Subject: [PATCH 28/39] update fix configuration.md Signed-off-by: dongjiang --- docs/configuration/configuration.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 8ac8bf56d1..058e5e750c 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -138,11 +138,11 @@ global: # Specifies the validation scheme for metric and label names. Either blank or # "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, # and underscores. - [ metric_name_validation_scheme | default "utf8" ] + [ metric_name_validation_scheme: | default "utf8" ] # Specifies whether to convert all scraped classic histograms into native # histograms with custom buckets. - [ convert_classic_histograms_to_nhcb | default = false] + [ convert_classic_histograms_to_nhcb: | default = false] # Specifies whether to scrape a classic histogram, even if it is also exposed as a native # histogram (has no effect without --enable-feature=native-histograms). @@ -487,7 +487,7 @@ metric_relabel_configs: # Specifies the validation scheme for metric and label names. Either blank or # "utf8" for full UTF-8 support, or "legacy" for letters, numbers, colons, and # underscores. -[ metric_name_validation_scheme | default "utf8" ] +[ metric_name_validation_scheme: | default "utf8" ] # Specifies the character escaping scheme that will be requested when scraping # for metric and label names that do not conform to the legacy Prometheus @@ -503,7 +503,7 @@ metric_relabel_configs: # If this value is left blank, Prometheus will default to `allow-utf-8` if the # validation scheme for the current scrape config is set to utf8, or # `underscores` if the validation scheme is set to `legacy`. -[ metric_name_validation_scheme | default "utf8" ] +[ metric_name_validation_scheme: | default "utf8" ] # Limit on total number of positive and negative buckets allowed in a single # native histogram. The resolution of a histogram with more buckets will be @@ -554,7 +554,7 @@ metric_relabel_configs: # Specifies whether to convert classic histograms into native histograms with # custom buckets (has no effect without --enable-feature=native-histograms). -[ convert_classic_histograms_to_nhcb | default = +[ convert_classic_histograms_to_nhcb: | default = ] ``` From ba4b058b7ab60105e03f83380cc3200a8a66e52f Mon Sep 17 00:00:00 2001 From: hardlydearly <799511800@qq.com> Date: Wed, 30 Apr 2025 10:43:26 +0800 Subject: [PATCH 29/39] refactor: use slices.Contains to simplify code Signed-off-by: hardlydearly <799511800@qq.com> --- cmd/prometheus/main.go | 7 +++---- config/config.go | 9 ++++----- discovery/kubernetes/kubernetes.go | 12 ++---------- model/labels/labels_common.go | 6 ++---- model/labels/regexp.go | 22 ++++++---------------- rules/manager_test.go | 8 +++----- tsdb/index/postings.go | 6 ++---- 7 files changed, 22 insertions(+), 48 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 0e547deaf9..52c5194a91 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -30,6 +30,7 @@ import ( goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package. "runtime" "runtime/debug" + "slices" "strconv" "strings" "sync" @@ -1921,10 +1922,8 @@ func (p *rwProtoMsgFlagParser) Set(opt string) error { if err := t.Validate(); err != nil { return err } - for _, prev := range *p.msgs { - if prev == t { - return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs) - } + if slices.Contains(*p.msgs, t) { + return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs) } *p.msgs = append(*p.msgs, t) return nil diff --git a/config/config.go b/config/config.go index 5fbcbd8307..63464afe03 100644 --- a/config/config.go +++ b/config/config.go @@ -21,6 +21,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -1109,13 +1110,11 @@ func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error return err } - for _, supportedVersion := range SupportedAlertmanagerAPIVersions { - if *v == supportedVersion { - return nil - } + if !slices.Contains(SupportedAlertmanagerAPIVersions, *v) { + return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v) } - return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v) + return nil } const ( diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 03d9f2f449..2c4829ca8d 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -20,6 +20,7 @@ import ( "log/slog" "os" "reflect" + "slices" "strings" "sync" "time" @@ -210,18 +211,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if _, ok := allowedSelectors[c.Role]; !ok { return fmt.Errorf("invalid role: %q, expecting one of: pod, service, endpoints, endpointslice, node or ingress", c.Role) } - var allowed bool - for _, role := range allowedSelectors[c.Role] { - if role == string(selector.Role) { - allowed = true - break - } - } - - if !allowed { + if !slices.Contains(allowedSelectors[c.Role], string(selector.Role)) { return fmt.Errorf("%s role supports only %s selectors", c.Role, strings.Join(allowedSelectors[c.Role], ", ")) } - _, err := fields.ParseSelector(selector.Field) if err != nil { return err diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 005eaa509e..092783fbbd 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -167,10 +167,8 @@ func (b *Builder) Del(ns ...string) *Builder { // Keep removes all labels from the base except those with the given names. func (b *Builder) Keep(ns ...string) *Builder { b.base.Range(func(l Label) { - for _, n := range ns { - if l.Name == n { - return - } + if slices.Contains(ns, l.Name) { + return } b.del = append(b.del, l.Name) }) diff --git a/model/labels/regexp.go b/model/labels/regexp.go index cf6c9158e9..1636aacc21 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -95,12 +95,7 @@ func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { return func(s string) bool { if len(m.setMatches) != 0 { - for _, match := range m.setMatches { - if match == s { - return true - } - } - return false + return slices.Contains(m.setMatches, s) } if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { return false @@ -771,16 +766,11 @@ func (m *equalMultiStringSliceMatcher) setMatches() []string { func (m *equalMultiStringSliceMatcher) Matches(s string) bool { if m.caseSensitive { - for _, v := range m.values { - if s == v { - return true - } - } - } else { - for _, v := range m.values { - if strings.EqualFold(s, v) { - return true - } + return slices.Contains(m.values, s) + } + for _, v := range m.values { + if strings.EqualFold(s, v) { + return true } } return false diff --git a/rules/manager_test.go b/rules/manager_test.go index efd7a8b23c..54ca8ebfb3 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -20,6 +20,7 @@ import ( "math" "os" "path" + "slices" "sort" "strconv" "sync" @@ -1008,11 +1009,8 @@ func TestMetricsUpdate(t *testing.T) { var metrics int for _, m := range ms { s := m.GetName() - for _, n := range metricNames { - if s == n { - metrics += len(m.Metric) - break - } + if slices.Contains(metricNames, s) { + metrics += len(m.Metric) } } return metrics diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 7fdf64acca..75e3c2c148 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -599,10 +599,8 @@ func Intersect(its ...Postings) Postings { if len(its) == 1 { return its[0] } - for _, p := range its { - if p == EmptyPostings() { - return EmptyPostings() - } + if slices.Contains(its, EmptyPostings()) { + return EmptyPostings() } return newIntersectPostings(its...) From 591242901ac691d0f688cceb1c02e297c57478ba Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Sun, 11 May 2025 18:46:15 +0530 Subject: [PATCH 30/39] promql: Refactor some functions to make them more DRY (#16532) Signed-off-by: Neeraj Gartia --- promql/functions.go | 206 ++++++++++++++++---------------------------- 1 file changed, 72 insertions(+), 134 deletions(-) diff --git a/promql/functions.go b/promql/functions.go index 41526fcb16..272ed15893 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -612,7 +612,6 @@ func funcClampMin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper // === round(Vector parser.ValueTypeVector, toNearest=1 Scalar) (Vector, Annotations) === func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) // round returns a number rounded to toNearest. // Ties are solved by rounding up. toNearest := float64(1) @@ -621,23 +620,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper } // Invert as it seems to cause fewer floating point accuracy issues. toNearestInverse := 1.0 / toNearest - - for _, el := range vec { - if el.H != nil { - // Process only float samples. - continue - } - f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse - if !enh.enableDelayedNameRemoval { - el.Metric = el.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: el.Metric, - F: f, - DropName: true, - }) - } - return enh.Out, nil + return simpleFloatFunc(vals, enh, func(f float64) float64 { + return math.Floor(f*toNearestInverse+0.5) / toNearestInverse + }), nil } // === Scalar(node parser.ValueTypeVector) Scalar === @@ -823,8 +808,8 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode }), annos } -// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === -func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +// compareOverTime is a helper used by funcMaxOverTime and funcMinOverTime. +func compareOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, compareFn func(float64, float64) bool) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] var annos annotations.Annotations if len(samples.Floats) == 0 { @@ -837,7 +822,7 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return aggrOverTime(vals, enh, func(s Series) float64 { maxVal := s.Floats[0].F for _, f := range s.Floats { - if f.F > maxVal || math.IsNaN(maxVal) { + if compareFn(f.F, maxVal) { maxVal = f.F } } @@ -845,26 +830,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode }), annos } +// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === +func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { + return (cur > maxVal) || math.IsNaN(maxVal) + }) +} + // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - samples := vals[0].(Matrix)[0] - var annos annotations.Annotations - if len(samples.Floats) == 0 { - return enh.Out, nil - } - if len(samples.Histograms) > 0 { - metricName := samples.Metric.Get(labels.MetricName) - annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) - } - return aggrOverTime(vals, enh, func(s Series) float64 { - minVal := s.Floats[0].F - for _, f := range s.Floats { - if f.F < minVal || math.IsNaN(minVal) { - minVal = f.F - } - } - return minVal - }), annos + return compareOverTime(vals, args, enh, func(cur, maxVal float64) bool { + return (cur < maxVal) || math.IsNaN(maxVal) + }) } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -997,7 +974,7 @@ func funcPresentOverTime(vals []parser.Value, _ parser.Expressions, enh *EvalNod }), nil } -func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { +func simpleFloatFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { if el.H == nil { // Process only float samples. if !enh.enableDelayedNameRemoval { @@ -1015,114 +992,114 @@ func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float6 // === abs(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAbs(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Abs), nil + return simpleFloatFunc(vals, enh, math.Abs), nil } // === ceil(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcCeil(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Ceil), nil + return simpleFloatFunc(vals, enh, math.Ceil), nil } // === floor(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcFloor(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Floor), nil + return simpleFloatFunc(vals, enh, math.Floor), nil } // === exp(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcExp(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Exp), nil + return simpleFloatFunc(vals, enh, math.Exp), nil } // === sqrt(Vector VectorNode) (Vector, Annotations) === func funcSqrt(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Sqrt), nil + return simpleFloatFunc(vals, enh, math.Sqrt), nil } // === ln(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcLn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Log), nil + return simpleFloatFunc(vals, enh, math.Log), nil } // === log2(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcLog2(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Log2), nil + return simpleFloatFunc(vals, enh, math.Log2), nil } // === log10(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcLog10(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Log10), nil + return simpleFloatFunc(vals, enh, math.Log10), nil } // === sin(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcSin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Sin), nil + return simpleFloatFunc(vals, enh, math.Sin), nil } // === cos(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcCos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Cos), nil + return simpleFloatFunc(vals, enh, math.Cos), nil } // === tan(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcTan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Tan), nil + return simpleFloatFunc(vals, enh, math.Tan), nil } // === asin(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAsin(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Asin), nil + return simpleFloatFunc(vals, enh, math.Asin), nil } // === acos(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAcos(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Acos), nil + return simpleFloatFunc(vals, enh, math.Acos), nil } // === atan(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAtan(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Atan), nil + return simpleFloatFunc(vals, enh, math.Atan), nil } // === sinh(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcSinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Sinh), nil + return simpleFloatFunc(vals, enh, math.Sinh), nil } // === cosh(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcCosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Cosh), nil + return simpleFloatFunc(vals, enh, math.Cosh), nil } // === tanh(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcTanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Tanh), nil + return simpleFloatFunc(vals, enh, math.Tanh), nil } // === asinh(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAsinh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Asinh), nil + return simpleFloatFunc(vals, enh, math.Asinh), nil } // === acosh(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAcosh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Acosh), nil + return simpleFloatFunc(vals, enh, math.Acosh), nil } // === atanh(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcAtanh(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, math.Atanh), nil + return simpleFloatFunc(vals, enh, math.Atanh), nil } // === rad(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcRad(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, func(v float64) float64 { + return simpleFloatFunc(vals, enh, func(v float64) float64 { return v * math.Pi / 180 }), nil } // === deg(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcDeg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, func(v float64) float64 { + return simpleFloatFunc(vals, enh, func(v float64) float64 { return v * 180 / math.Pi }), nil } @@ -1134,7 +1111,7 @@ func funcPi(_ []parser.Value, _ parser.Expressions, _ *EvalNodeHelper) (Vector, // === sgn(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcSgn(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - return simpleFunc(vals, enh, func(v float64) float64 { + return simpleFloatFunc(vals, enh, func(v float64) float64 { switch { case v < 0: return -1 @@ -1271,79 +1248,48 @@ func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNo return append(enh.Out, Sample{F: slope*duration + intercept}), nil } +func simpleHistogramFunc(vals []parser.Value, enh *EvalNodeHelper, f func(h *histogram.FloatHistogram) float64) Vector { + for _, el := range vals[0].(Vector) { + if el.H != nil { // Process only histogram samples. + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } + enh.Out = append(enh.Out, Sample{ + Metric: el.Metric, + F: f(el.H), + DropName: true, + }) + } + } + return enh.Out +} + // === histogram_count(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcHistogramCount(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - if !enh.enableDelayedNameRemoval { - sample.Metric = sample.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric, - F: sample.H.Count, - DropName: true, - }) - } - return enh.Out, nil + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + return h.Count + }), nil } // === histogram_sum(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcHistogramSum(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - if !enh.enableDelayedNameRemoval { - sample.Metric = sample.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric, - F: sample.H.Sum, - DropName: true, - }) - } - return enh.Out, nil + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + return h.Sum + }), nil } // === histogram_avg(Vector parser.ValueTypeVector) (Vector, Annotations) === func funcHistogramAvg(vals []parser.Value, _ parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - inVec := vals[0].(Vector) - - for _, sample := range inVec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - if !enh.enableDelayedNameRemoval { - sample.Metric = sample.Metric.DropMetricName() - } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric, - F: sample.H.Sum / sample.H.Count, - DropName: true, - }) - } - return enh.Out, nil + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + return h.Sum / h.Count + }), nil } func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResult func(float64) float64) (Vector, annotations.Annotations) { - vec := vals[0].(Vector) - for _, sample := range vec { - // Skip non-histogram samples. - if sample.H == nil { - continue - } - mean := sample.H.Sum / sample.H.Count + return simpleHistogramFunc(vals, enh, func(h *histogram.FloatHistogram) float64 { + mean := h.Sum / h.Count var variance, cVariance float64 - it := sample.H.AllBucketIterator() + it := h.AllBucketIterator() for it.Next() { bucket := it.At() if bucket.Count == 0 { @@ -1351,7 +1297,7 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul } var val float64 switch { - case sample.H.UsesCustomBuckets(): + case h.UsesCustomBuckets(): // Use arithmetic mean in case of custom buckets. val = (bucket.Upper + bucket.Lower) / 2.0 case bucket.Lower <= 0 && bucket.Upper >= 0: @@ -1368,20 +1314,12 @@ func histogramVariance(vals []parser.Value, enh *EvalNodeHelper, varianceToResul variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) } variance += cVariance - variance /= sample.H.Count - if !enh.enableDelayedNameRemoval { - sample.Metric = sample.Metric.DropMetricName() - } + variance /= h.Count if varianceToResult != nil { variance = varianceToResult(variance) } - enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric, - F: variance, - DropName: true, - }) - } - return enh.Out, nil + return variance + }), nil } // === histogram_stddev(Vector parser.ValueTypeVector) (Vector, Annotations) === From 8b0d33e5b20475318a1a72857ed8a9947b13e1aa Mon Sep 17 00:00:00 2001 From: Neeraj Gartia <80708727+NeerajGartia21@users.noreply.github.com> Date: Sun, 11 May 2025 19:10:31 +0530 Subject: [PATCH 31/39] promql: support variable scalar parameter in aggregations in range queries (#16404) This fixes the regression introduced in https://github.com/prometheus/prometheus/issues/15971 while preserving the performance improvements. Signed-off-by: Neeraj Gartia --- promql/engine.go | 131 +++++++++++++------- promql/promqltest/testdata/aggregators.test | 20 ++- promql/promqltest/testdata/limit.test | 22 +++- promql/value.go | 66 ++++++++++ 4 files changed, 186 insertions(+), 53 deletions(-) diff --git a/promql/engine.go b/promql/engine.go index a2738fdc1e..b5fec3153e 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -1377,7 +1377,7 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label return mat, warnings } -func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, params *fParams) (Matrix, annotations.Annotations) { // Keep a copy of the original point slice so that it can be returned to the pool. origMatrix := slices.Clone(inputMatrix) defer func() { @@ -1387,7 +1387,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate } }() - var warnings annotations.Annotations + var annos annotations.Annotations enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval} tempNumSamples := ev.currentSamples @@ -1417,46 +1417,43 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate } groups := make([]groupedAggregation, groupCount) - var k int64 - var ratio float64 var seriess map[uint64]Series + switch aggExpr.Op { case parser.TOPK, parser.BOTTOMK, parser.LIMITK: - if !convertibleToInt64(param) { - ev.errorf("Scalar value %v overflows int64", param) + // Return early if all k values are less than one. + if params.Max() < 1 { + return nil, annos } - k = int64(param) - if k > int64(len(inputMatrix)) { - k = int64(len(inputMatrix)) - } - if k < 1 { - return nil, warnings - } - seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + seriess = make(map[uint64]Series, len(inputMatrix)) + case parser.LIMIT_RATIO: - if math.IsNaN(param) { - ev.errorf("Ratio value %v is NaN", param) + // Return early if all r values are zero. + if params.Max() == 0 && params.Min() == 0 { + return nil, annos } - switch { - case param == 0: - return nil, warnings - case param < -1.0: - ratio = -1.0 - warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) - case param > 1.0: - ratio = 1.0 - warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange())) - default: - ratio = param + if params.Max() > 1.0 { + annos.Add(annotations.NewInvalidRatioWarning(params.Max(), 1.0, aggExpr.Param.PositionRange())) } - seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash. + if params.Min() < -1.0 { + annos.Add(annotations.NewInvalidRatioWarning(params.Min(), -1.0, aggExpr.Param.PositionRange())) + } + seriess = make(map[uint64]Series, len(inputMatrix)) + case parser.QUANTILE: - if math.IsNaN(param) || param < 0 || param > 1 { - warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange())) + if params.HasAnyNaN() { + annos.Add(annotations.NewInvalidQuantileWarning(math.NaN(), aggExpr.Param.PositionRange())) + } + if params.Max() > 1 { + annos.Add(annotations.NewInvalidQuantileWarning(params.Max(), aggExpr.Param.PositionRange())) + } + if params.Min() < 0 { + annos.Add(annotations.NewInvalidQuantileWarning(params.Min(), aggExpr.Param.PositionRange())) } } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + fParam := params.Next() if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } @@ -1468,17 +1465,17 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate var ws annotations.Annotations switch aggExpr.Op { case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO: - result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess) + result, ws = ev.aggregationK(aggExpr, fParam, inputMatrix, seriesToResult, groups, enh, seriess) // If this could be an instant query, shortcut so as not to change sort order. - if ev.endTimestamp == ev.startTimestamp { - warnings.Merge(ws) - return result, warnings + if ev.startTimestamp == ev.endTimestamp { + annos.Merge(ws) + return result, annos } default: - ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh) + ws = ev.aggregation(aggExpr, fParam, inputMatrix, result, seriesToResult, groups, enh) } - warnings.Merge(ws) + annos.Merge(ws) if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -1503,7 +1500,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate } result = result[:dst] } - return result, warnings + return result, annos } // evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset. @@ -1681,18 +1678,14 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, var warnings annotations.Annotations originalNumSamples := ev.currentSamples // param is the number k for topk/bottomk, or q for quantile. - var fParam float64 - if param != nil { - val, ws := ev.eval(ctx, param) - warnings.Merge(ws) - fParam = val.(Matrix)[0].Floats[0].F - } + fp, ws := newFParams(ctx, ev, param) + warnings.Merge(ws) // Now fetch the data to be aggregated. val, ws := ev.eval(ctx, e.Expr) warnings.Merge(ws) inputMatrix := val.(Matrix) - result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam) + result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fp) warnings.Merge(ws) ev.currentSamples = originalNumSamples + result.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -3269,7 +3262,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // seriesToResult maps inputMatrix indexes to groups indexes. // For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. // For a range query, aggregates output in the seriess map. -func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, fParam float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { op := e.Op var s Sample var annos annotations.Annotations @@ -3278,6 +3271,14 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, i for i := range groups { groups[i].seen = false } + // advanceRemainingSeries discards any values at the current timestamp `ts` + // for the remaining input series. In range queries, if these values are not + // consumed now, they will no longer be accessible in the next evaluation step. + advanceRemainingSeries := func(ts int64, startIdx int) { + for i := startIdx; i < len(inputMatrix); i++ { + _, _, _ = ev.nextValues(ts, &inputMatrix[i]) + } + } seriesLoop: for si := range inputMatrix { @@ -3287,6 +3288,42 @@ seriesLoop: } s = Sample{Metric: inputMatrix[si].Metric, F: f, H: h, DropName: inputMatrix[si].DropName} + var k int64 + var r float64 + switch op { + case parser.TOPK, parser.BOTTOMK, parser.LIMITK: + if !convertibleToInt64(fParam) { + ev.errorf("Scalar value %v overflows int64", fParam) + } + k = int64(fParam) + if k > int64(len(inputMatrix)) { + k = int64(len(inputMatrix)) + } + if k < 1 { + if enh.Ts != ev.endTimestamp { + advanceRemainingSeries(enh.Ts, si+1) + } + return nil, annos + } + case parser.LIMIT_RATIO: + if math.IsNaN(fParam) { + ev.errorf("Ratio value %v is NaN", fParam) + } + switch { + case fParam == 0: + if enh.Ts != ev.endTimestamp { + advanceRemainingSeries(enh.Ts, si+1) + } + return nil, annos + case fParam < -1.0: + r = -1.0 + case fParam > 1.0: + r = 1.0 + default: + r = fParam + } + } + group := &groups[seriesToResult[si]] // Initialize this group if it's the first time we've seen it. if !group.seen { @@ -3377,6 +3414,10 @@ seriesLoop: group.groupAggrComplete = true groupsRemaining-- if groupsRemaining == 0 { + // Process other values in the series before breaking the loop in case of range query. + if enh.Ts != ev.endTimestamp { + advanceRemainingSeries(enh.Ts, si+1) + } break seriesLoop } } diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index 1e3ab79a35..b8ebdc55c6 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -274,7 +274,7 @@ load 5m http_requests{job="app-server", instance="1", group="canary"} 0+80x10 http_requests_histogram{job="app-server", instance="2", group="canary"} {{schema:0 sum:10 count:10}}x11 http_requests_histogram{job="api-server", instance="3", group="production"} {{schema:0 sum:20 count:20}}x11 - foo 3+0x10 + foo 1+1x9 3 eval_ordered instant at 50m topk(3, http_requests) http_requests{group="canary", instance="1", job="app-server"} 800 @@ -340,6 +340,13 @@ eval_ordered instant at 50m topk(scalar(foo), http_requests) http_requests{group="canary", instance="0", job="app-server"} 700 http_requests{group="production", instance="1", job="app-server"} 600 +# Bug #15971. +eval range from 0m to 50m step 5m count(topk(scalar(foo), http_requests)) + {} 1 2 3 4 5 6 7 8 9 9 3 + +eval range from 0m to 50m step 5m count(bottomk(scalar(foo), http_requests)) + {} 1 2 3 4 5 6 7 8 9 9 3 + # Tests for histogram: should ignore histograms. eval_info instant at 50m topk(100, http_requests_histogram) #empty @@ -447,7 +454,7 @@ load 10s data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} - foo .8 + foo 0 1 0 1 0 1 0.8 eval instant at 1m quantile without(point)(0.8, data) {test="two samples"} 0.8 @@ -475,11 +482,18 @@ eval instant at 1m quantile without(point)((scalar(foo)), data) {test="three samples"} 1.6 {test="uneven samples"} 2.8 -eval_warn instant at 1m quantile without(point)(NaN, data) +eval instant at 1m quantile without(point)(NaN, data) + expect warn msg: PromQL warning: quantile value should be between 0 and 1, got NaN {test="two samples"} NaN {test="three samples"} NaN {test="uneven samples"} NaN +# Bug #15971. +eval range from 0m to 1m step 10s quantile without(point) (scalar(foo), data) + {test="two samples"} 0 1 0 1 0 1 0.8 + {test="three samples"} 0 2 0 2 0 2 1.6 + {test="uneven samples"} 0 4 0 4 0 4 2.8 + # Tests for group. clear diff --git a/promql/promqltest/testdata/limit.test b/promql/promqltest/testdata/limit.test index e6dd007af4..484760cc85 100644 --- a/promql/promqltest/testdata/limit.test +++ b/promql/promqltest/testdata/limit.test @@ -11,6 +11,8 @@ load 5m http_requests{job="api-server", instance="3", group="canary"} 0+60x10 http_requests{job="api-server", instance="histogram_1", group="canary"} {{schema:0 sum:10 count:10}}x11 http_requests{job="api-server", instance="histogram_2", group="canary"} {{schema:0 sum:20 count:20}}x11 + foo 1+1x10 + bar 0 1 0 -1 0 1 0 -1 0 1 0 eval instant at 50m count(limitk by (group) (0, http_requests)) # empty @@ -69,6 +71,10 @@ eval instant at 50m count(limitk(1000, http_requests{instance=~"histogram_[0-9]" eval range from 0 to 50m step 5m count(limitk(1000, http_requests{instance=~"histogram_[0-9]"})) {} 2+0x10 +# Bug #15971. +eval range from 0m to 50m step 5m count(limitk(scalar(foo), http_requests)) + {} 1 2 3 4 5 6 7 8 8 8 8 + # limit_ratio eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests)) # empty @@ -105,11 +111,13 @@ eval range from 0 to 50m step 5m count(limit_ratio(-1.0, http_requests) and http {} 8+0x10 # Capped to 1.0 -> all samples. -eval_warn range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests) +eval range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests) + expect warn msg: PromQL warning: ratio value should be between -1 and 1, got 1.1, capping to 1 {} 8+0x10 # Capped to -1.0 -> all samples. -eval_warn range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests) +eval range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests) + expect warn msg: PromQL warning: ratio value should be between -1 and 1, got -1.1, capping to -1 {} 8+0x10 # Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other. @@ -137,12 +145,12 @@ eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) or limit_ eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit_ratio(-0.2, http_requests)) # empty -# Complement below for [some_ratio, 1.0 - some_ratio], some_ratio derived from time(), +# Complement below for [some_ratio, - (1.0 - some_ratio)], some_ratio derived from time(), # using a small prime number to avoid rounded ratio values, and a small set of them. -eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio(1.0 - (time() % 17/17), http_requests)) +eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio( - (1.0 - (time() % 17/17)), http_requests)) {} 8+0x10 -eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio(1.0 - (time() % 17/17), http_requests)) +eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio( - (1.0 - (time() % 17/17)), http_requests)) # empty # Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time). @@ -156,3 +164,7 @@ eval instant at 50m limit_ratio(1, http_requests{instance="histogram_1"}) eval range from 0 to 50m step 5m limit_ratio(1, http_requests{instance="histogram_1"}) {__name__="http_requests", group="canary", instance="histogram_1", job="api-server"} {{count:10 sum:10}}x10 + +# Bug #15971. +eval range from 0m to 50m step 5m count(limit_ratio(scalar(bar), http_requests)) + {} _ 8 _ 8 _ 8 _ 8 _ 8 _ diff --git a/promql/value.go b/promql/value.go index f19c0b5b58..dc59b9e9cc 100644 --- a/promql/value.go +++ b/promql/value.go @@ -14,6 +14,7 @@ package promql import ( + "context" "encoding/json" "errors" "fmt" @@ -533,3 +534,68 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { func (ssi *storageSeriesIterator) Err() error { return nil } + +type fParams struct { + series Series + constValue float64 + isConstant bool + minValue float64 + maxValue float64 + hasAnyNaN bool +} + +// newFParams evaluates the expression and returns an fParams object, +// which holds the parameter values (constant or series) along with min, max, and NaN info. +func newFParams(ctx context.Context, ev *evaluator, expr parser.Expr) (*fParams, annotations.Annotations) { + if expr == nil { + return &fParams{}, nil + } + var constParam bool + if _, ok := expr.(*parser.NumberLiteral); ok { + constParam = true + } + val, ws := ev.eval(ctx, expr) + mat, ok := val.(Matrix) + if !ok || len(mat) == 0 { + return &fParams{}, ws + } + fp := &fParams{ + series: mat[0], + isConstant: constParam, + minValue: math.MaxFloat64, + maxValue: -math.MaxFloat64, + } + + if constParam { + fp.constValue = fp.series.Floats[0].F + fp.minValue, fp.maxValue = fp.constValue, fp.constValue + fp.hasAnyNaN = math.IsNaN(fp.constValue) + return fp, ws + } + + for _, v := range fp.series.Floats { + fp.maxValue = math.Max(fp.maxValue, v.F) + fp.minValue = math.Min(fp.minValue, v.F) + if math.IsNaN(v.F) { + fp.hasAnyNaN = true + } + } + return fp, ws +} + +func (fp *fParams) Max() float64 { return fp.maxValue } +func (fp *fParams) Min() float64 { return fp.minValue } +func (fp *fParams) HasAnyNaN() bool { return fp.hasAnyNaN } + +// Next returns the next value from the series or the constant value, and advances the series if applicable. +func (fp *fParams) Next() float64 { + if fp.isConstant { + return fp.constValue + } + if len(fp.series.Floats) > 0 { + val := fp.series.Floats[0].F + fp.series.Floats = fp.series.Floats[1:] + return val + } + return 0 +} From 5c06804df88d1e92e04e5790295fe0d3250eec16 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 12 May 2025 10:39:58 +0200 Subject: [PATCH 32/39] Optimize memoization and search debouncing on /targets page (#16589) Moving the debouncing of the search field to the parent component and then memoizing the ScrapePoolsList component prevents a lot of superfluous re-renders of the entire scrape pools list that previously got triggered immediately when you typed in the search box or even just collapsed a pool. (While the computation of what data to show was already memoized in the ScrapePoolList component, the component itself still had to re-render a lot with the same data.) Discovered this problem + verified fix using react-scan. Signed-off-by: Julius Volz --- .../src/pages/targets/ScrapePoolsList.tsx | 519 +++++++++--------- .../src/pages/targets/TargetsPage.tsx | 12 +- 2 files changed, 270 insertions(+), 261 deletions(-) diff --git a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx index c444747724..7423fd8a72 100644 --- a/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx +++ b/web/ui/mantine-ui/src/pages/targets/ScrapePoolsList.tsx @@ -19,7 +19,7 @@ import { } from "@tabler/icons-react"; import { useSuspenseAPIQuery } from "../../api/api"; import { Target, TargetsResult } from "../../api/responseTypes/targets"; -import React, { FC, useMemo } from "react"; +import React, { FC, memo, useMemo } from "react"; import { humanizeDurationRelative, humanizeDuration, @@ -37,7 +37,6 @@ import CustomInfiniteScroll from "../../components/CustomInfiniteScroll"; import badgeClasses from "../../Badge.module.css"; import panelClasses from "../../Panel.module.css"; import TargetLabels from "./TargetLabels"; -import { useDebouncedValue } from "@mantine/hooks"; import { targetPoolDisplayLimit } from "./TargetsPage"; import { badgeIconStyle } from "../../styles"; @@ -145,278 +144,280 @@ type ScrapePoolListProp = { searchFilter: string; }; -const ScrapePoolList: FC = ({ - poolNames, - selectedPool, - healthFilter, - searchFilter, -}) => { - // Based on the selected pool (if any), load the list of targets. - const { - data: { - data: { activeTargets }, - }, - } = useSuspenseAPIQuery({ - path: `/targets`, - params: { - state: "active", - scrapePool: selectedPool === null ? "" : selectedPool, - }, - }); +const ScrapePoolList: FC = memo( + ({ poolNames, selectedPool, healthFilter, searchFilter }) => { + // Based on the selected pool (if any), load the list of targets. + const { + data: { + data: { activeTargets }, + }, + } = useSuspenseAPIQuery({ + path: `/targets`, + params: { + state: "active", + scrapePool: selectedPool === null ? "" : selectedPool, + }, + }); - const dispatch = useAppDispatch(); - const [showEmptyPools, setShowEmptyPools] = useLocalStorage({ - key: "targetsPage.showEmptyPools", - defaultValue: false, - }); + const dispatch = useAppDispatch(); + const [showEmptyPools, setShowEmptyPools] = useLocalStorage({ + key: "targetsPage.showEmptyPools", + defaultValue: false, + }); - const { collapsedPools, showLimitAlert } = useAppSelector( - (state) => state.targetsPage - ); + const { collapsedPools, showLimitAlert } = useAppSelector( + (state) => state.targetsPage + ); - const [debouncedSearch] = useDebouncedValue(searchFilter.trim(), 250); + const allPools = useMemo( + () => + buildPoolsData( + selectedPool ? [selectedPool] : poolNames, + activeTargets, + searchFilter, + healthFilter + ), + [selectedPool, poolNames, activeTargets, searchFilter, healthFilter] + ); - const allPools = useMemo( - () => - buildPoolsData( - selectedPool ? [selectedPool] : poolNames, - activeTargets, - debouncedSearch, - healthFilter - ), - [selectedPool, poolNames, activeTargets, debouncedSearch, healthFilter] - ); + const allPoolNames = Object.keys(allPools); + const shownPoolNames = showEmptyPools + ? allPoolNames + : allPoolNames.filter((pn) => allPools[pn].targets.length !== 0); - const allPoolNames = Object.keys(allPools); - const shownPoolNames = showEmptyPools - ? allPoolNames - : allPoolNames.filter((pn) => allPools[pn].targets.length !== 0); - - return ( - - {allPoolNames.length === 0 ? ( - }> - No scrape pools found. - - ) : ( - !showEmptyPools && - allPoolNames.length !== shownPoolNames.length && ( - } - > - Hiding {allPoolNames.length - shownPoolNames.length} empty pools due - to filters or no targets. - setShowEmptyPools(true)}> - Show empty pools - + return ( + + {allPoolNames.length === 0 ? ( + }> + No scrape pools found. - ) - )} - {showLimitAlert && ( - } - withCloseButton - onClose={() => dispatch(setShowLimitAlert(false))} - > - There are more than {targetPoolDisplayLimit} scrape pools. Showing - only the first one. Use the dropdown to select a different pool. - - )} - !collapsedPools.includes(p))} - onChange={(value) => - dispatch( - setCollapsedPools(allPoolNames.filter((p) => !value.includes(p))) - ) - } - > - {shownPoolNames.map((poolName) => { - const pool = allPools[poolName]; - return ( - } > - - - {poolName} - - - {pool.upCount} / {pool.count} up - - + Hiding {allPoolNames.length - shownPoolNames.length} empty pools + due to filters or no targets. + setShowEmptyPools(true)}> + Show empty pools + + + ) + )} + {showLimitAlert && ( + } + withCloseButton + onClose={() => dispatch(setShowLimitAlert(false))} + > + There are more than {targetPoolDisplayLimit} scrape pools. Showing + only the first one. Use the dropdown to select a different pool. + + )} + !collapsedPools.includes(p))} + onChange={(value) => + dispatch( + setCollapsedPools(allPoolNames.filter((p) => !value.includes(p))) + ) + } + > + {shownPoolNames.map((poolName) => { + const pool = allPools[poolName]; + return ( + + + + {poolName} + + + {pool.upCount} / {pool.count} up + + + - - - - {pool.count === 0 ? ( - }> - No active targets in this scrape pool. - setShowEmptyPools(false)} + + + {pool.count === 0 ? ( + }> + No active targets in this scrape pool. + setShowEmptyPools(false)} + > + Hide empty pools + + + ) : pool.targets.length === 0 ? ( + } > - Hide empty pools - - - ) : pool.targets.length === 0 ? ( - }> - No targets in this pool match your filter criteria (omitted{" "} - {pool.count} filtered targets). - setShowEmptyPools(false)} - > - Hide empty pools - - - ) : ( - ( - - - - Endpoint - Labels - Last scrape - State - - - - {items.map((target, i) => ( - // TODO: Find a stable and definitely unique key. - - - - - + No targets in this pool match your filter criteria + (omitted {pool.count} filtered targets). + setShowEmptyPools(false)} + > + Hide empty pools + + + ) : ( + ( +
+ + + Endpoint + Labels + Last scrape + State + + + + {items.map((target, i) => ( + // TODO: Find a stable and definitely unique key. + + + + + - - - - - - - - } + + + + + + - {humanizeDurationRelative( - target.lastScrape, - now() - )} - - + + } + > + {humanizeDurationRelative( + target.lastScrape, + now() + )} + + - - - } + - {humanizeDuration( - target.lastScrapeDuration * 1000 - )} - - - - - - - {target.health} - - - - {target.lastError && ( - - - } + + } + > + {humanizeDuration( + target.lastScrapeDuration * 1000 + )} + + + + + + - Error scraping target:{" "} - {target.lastError} - + {target.health} + - )} - - ))} - -
- )} - /> - )} -
-
- ); - })} -
-
- ); -}; + {target.lastError && ( + + + } + > + Error scraping target:{" "} + {target.lastError} + + + + )} + + ))} + + + )} + /> + )} + + + ); + })} + + + ); + } +); export default ScrapePoolList; diff --git a/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx b/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx index 399d1a458d..75d7bd2f4e 100644 --- a/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx +++ b/web/ui/mantine-ui/src/pages/targets/TargetsPage.tsx @@ -30,9 +30,16 @@ import ScrapePoolList from "./ScrapePoolsList"; import { useSuspenseAPIQuery } from "../../api/api"; import { ScrapePoolsResult } from "../../api/responseTypes/scrapePools"; import { expandIconStyle, inputIconStyle } from "../../styles"; +import { useDebouncedValue } from "@mantine/hooks"; export const targetPoolDisplayLimit = 20; +// Should be defined as a constant here instead of inline as a value +// to avoid unnecessary re-renders. Otherwise the empty array has +// a different reference on each render and causes subsequent memoized +// computations to re-run as long as no state filter is selected. +const emptyHealthFilter: string[] = []; + export default function TargetsPage() { // Load the list of all available scrape pools. const { @@ -48,12 +55,13 @@ export default function TargetsPage() { const [scrapePool, setScrapePool] = useQueryParam("pool", StringParam); const [healthFilter, setHealthFilter] = useQueryParam( "health", - withDefault(ArrayParam, []) + withDefault(ArrayParam, emptyHealthFilter) ); const [searchFilter, setSearchFilter] = useQueryParam( "search", withDefault(StringParam, "") ); + const [debouncedSearch] = useDebouncedValue(searchFilter.trim(), 250); const { collapsedPools, showLimitAlert } = useAppSelector( (state) => state.targetsPage @@ -147,7 +155,7 @@ export default function TargetsPage() { poolNames={scrapePools} selectedPool={(limited && scrapePools[0]) || scrapePool || null} healthFilter={healthFilter as string[]} - searchFilter={searchFilter} + searchFilter={debouncedSearch} /> From dbf5d01a62249eddcd202303069f6cf7dd3c4a73 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 12 May 2025 12:17:18 +0200 Subject: [PATCH 33/39] Fix full-page re-rendering when opening status nav menu (#16590) When opening the status pages menu while already viewing one of the status pages, the whole page would be re-rendered because the menu target's default action of following the current page's URL was not prevented. Also, we don't need to use a NavLink component for the menu target when we are not viewing a status page, because then the component won't need to be highlighted anyways. Discovered + fixed with the help of react-scan. Signed-off-by: Julius Volz --- web/ui/mantine-ui/src/App.tsx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/mantine-ui/src/App.tsx b/web/ui/mantine-ui/src/App.tsx index 2599c9f5aa..f3f02b41dc 100644 --- a/web/ui/mantine-ui/src/App.tsx +++ b/web/ui/mantine-ui/src/App.tsx @@ -224,6 +224,7 @@ function App() { leftSection={p.icon} rightSection={} px={navLinkXPadding} + onClick={(e) => e.preventDefault()} > Status {p.title} @@ -236,14 +237,9 @@ function App() { element={