diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index edb0cf5658..63e7302929 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -673,7 +673,7 @@ func main() { // Set Go runtime parameters before we get too far into initialization. updateGoGC(cfgFile, logger) if cfg.maxprocsEnable { - l := func(format string, a ...interface{}) { + l := func(format string, a ...any) { logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs") } if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index e4262f1b3b..e37e012e0c 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -202,7 +202,6 @@ func TestSendAlerts(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) { require.NotEmpty(t, tc.in, "sender called with 0 alert") diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go index 94eec27e79..66bfe9b60a 100644 --- a/cmd/prometheus/main_unix_test.go +++ b/cmd/prometheus/main_unix_test.go @@ -53,7 +53,7 @@ func TestStartupInterrupt(t *testing.T) { url := "http://localhost" + port + "/graph" Loop: - for x := 0; x < 10; x++ { + for range 10 { // error=nil means prometheus has started, so we can send the interrupt // signal and wait for the graceful shutdown. if _, err := http.Get(url); err == nil { diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 7c073b59d0..645ac31145 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -70,7 +70,7 @@ func (p *queryLogTest) skip(t *testing.T) { // waitForPrometheus waits for Prometheus to be ready. func (p *queryLogTest) waitForPrometheus() error { var err error - for x := 0; x < 20; x++ { + for range 20 { var r *http.Response if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK { break diff --git a/cmd/promtool/analyze.go b/cmd/promtool/analyze.go index 26e6f2188c..aea72a193b 100644 --- a/cmd/promtool/analyze.go +++ b/cmd/promtool/analyze.go @@ -207,7 +207,7 @@ func calcClassicBucketStatistics(matrix model.Matrix) (*statistics, error) { sortMatrix(matrix) totalPop := 0 - for timeIdx := 0; timeIdx < numSamples; timeIdx++ { + for timeIdx := range numSamples { curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx) if err != nil { return stats, err diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index f512728ac9..da14c5d809 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -155,10 +155,7 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u var wg sync.WaitGroup lbls := lbls for len(lbls) > 0 { - l := 1000 - if len(lbls) < 1000 { - l = len(lbls) - } + l := min(len(lbls), 1000) batch := lbls[:l] lbls = lbls[l:] @@ -200,7 +197,7 @@ func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount in } total := uint64(0) - for i := 0; i < scrapeCount; i++ { + for range scrapeCount { app := b.storage.Appender(context.TODO()) ts += timeDelta diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index dbef349ab5..3fbed5546c 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -22,6 +22,7 @@ import ( "math" "os" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -278,9 +279,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde for k := range alertEvalTimesMap { alertEvalTimes = append(alertEvalTimes, k) } - sort.Slice(alertEvalTimes, func(i, j int) bool { - return alertEvalTimes[i] < alertEvalTimes[j] - }) + slices.Sort(alertEvalTimes) // Current index in alertEvalTimes what we are looking at. curr := 0 diff --git a/config/config.go b/config/config.go index 058b4bf881..192c216290 100644 --- a/config/config.go +++ b/config/config.go @@ -367,7 +367,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { // UnmarshalYAML implements the yaml.Unmarshaler interface. // NOTE: This method should not be used outside of this package. Use Load or LoadFile instead. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultConfig // We want to set c to the defaults and then overwrite it with the input. // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML @@ -594,7 +594,7 @@ func (c *GlobalConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error { // Create a clean global config as the previous one was already populated // by the default due to the YAML parser behavior for empty blocks. gc := &GlobalConfig{} @@ -630,11 +630,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return errors.New("global scrape timeout greater than scrape interval") } if gc.ScrapeTimeout == 0 { - if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval { - gc.ScrapeTimeout = gc.ScrapeInterval - } else { - gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout - } + gc.ScrapeTimeout = min(DefaultGlobalConfig.ScrapeTimeout, gc.ScrapeInterval) } if gc.EvaluationInterval == 0 { gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval @@ -790,7 +786,7 @@ func (c *ScrapeConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultScrapeConfig if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil { return err @@ -841,11 +837,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) } if c.ScrapeTimeout == 0 { - if globalConfig.ScrapeTimeout > c.ScrapeInterval { - c.ScrapeTimeout = c.ScrapeInterval - } else { - c.ScrapeTimeout = globalConfig.ScrapeTimeout - } + c.ScrapeTimeout = min(globalConfig.ScrapeTimeout, c.ScrapeInterval) } if c.BodySizeLimit == 0 { c.BodySizeLimit = globalConfig.BodySizeLimit @@ -970,7 +962,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { } // MarshalYAML implements the yaml.Marshaler interface. -func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { +func (c *ScrapeConfig) MarshalYAML() (any, error) { return discovery.MarshalYAMLWithInlineConfigs(c) } @@ -1024,7 +1016,7 @@ type TSDBConfig struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *TSDBConfig) UnmarshalYAML(unmarshal func(any) error) error { *t = TSDBConfig{} type plain TSDBConfig if err := unmarshal((*plain)(t)); err != nil { @@ -1046,7 +1038,7 @@ const ( ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (t *TracingClientType) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *TracingClientType) UnmarshalYAML(unmarshal func(any) error) error { *t = TracingClientType("") type plain TracingClientType if err := unmarshal((*plain)(t)); err != nil { @@ -1080,7 +1072,7 @@ func (t *TracingConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (t *TracingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (t *TracingConfig) UnmarshalYAML(unmarshal func(any) error) error { *t = TracingConfig{ ClientType: TracingClientGRPC, } @@ -1140,7 +1132,7 @@ func (c *AlertingConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *AlertingConfig) UnmarshalYAML(unmarshal func(any) error) error { // Create a clean global config as the previous one was already populated // by the default due to the YAML parser behavior for empty blocks. *c = AlertingConfig{} @@ -1175,7 +1167,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { type AlertmanagerAPIVersion string // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(any) error) error { *v = AlertmanagerAPIVersion("") type plain AlertmanagerAPIVersion if err := unmarshal((*plain)(v)); err != nil { @@ -1234,7 +1226,7 @@ func (c *AlertmanagerConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultAlertmanagerConfig if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil { return err @@ -1291,7 +1283,7 @@ func (c *AlertmanagerConfig) Validate(nameValidationScheme model.ValidationSchem } // MarshalYAML implements the yaml.Marshaler interface. -func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) { +func (c *AlertmanagerConfig) MarshalYAML() (any, error) { return discovery.MarshalYAMLWithInlineConfigs(c) } @@ -1395,7 +1387,7 @@ func (c *RemoteWriteConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultRemoteWriteConfig type plain RemoteWriteConfig if err := unmarshal((*plain)(c)); err != nil { @@ -1560,7 +1552,7 @@ func (c *RemoteReadConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultRemoteReadConfig type plain RemoteReadConfig if err := unmarshal((*plain)(c)); err != nil { @@ -1620,7 +1612,7 @@ type OTLPConfig struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *OTLPConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultOTLPConfig type plain OTLPConfig if err := unmarshal((*plain)(c)); err != nil { diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index 431bbcd811..539cd84c4f 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -116,7 +116,7 @@ func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery } // UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config. -func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultEC2SDConfig type plain EC2SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 62c64afcfd..5c356c8c45 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -98,7 +98,7 @@ func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (dis } // UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config. -func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultLightsailSDConfig type plain LightsailSDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 670afb5a4e..bed4861787 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -138,7 +138,7 @@ func validateAuthParam(param, name string) error { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 4c8de6e291..74b5d0724e 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -142,7 +142,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index ea896ce31b..a450cf216f 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -437,8 +437,8 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { } func TestUnmarshalConfig(t *testing.T) { - unmarshal := func(d []byte) func(interface{}) error { - return func(o interface{}) error { + unmarshal := func(d []byte) func(any) error { + return func(o any) error { return yaml.Unmarshal(d, o) } } diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index d0ececd9e9..5c9795440d 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -93,7 +93,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/discovery.go b/discovery/discovery.go index 2efffd0e19..2157b820b9 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -108,7 +108,7 @@ func (c *Configs) SetDirectory(dir string) { } // UnmarshalYAML implements yaml.Unmarshaler. -func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Configs) UnmarshalYAML(unmarshal func(any) error) error { cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -123,7 +123,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements yaml.Marshaler. -func (c Configs) MarshalYAML() (interface{}, error) { +func (c Configs) MarshalYAML() (any, error) { cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index 405dba44f7..24af8f65d9 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -82,7 +82,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/dns/dns_test.go b/discovery/dns/dns_test.go index 2b7100c7e7..a1c161789c 100644 --- a/discovery/dns/dns_test.go +++ b/discovery/dns/dns_test.go @@ -251,7 +251,6 @@ func TestDNS(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -282,8 +281,8 @@ func TestSDConfigUnmarshalYAML(t *testing.T) { return d } - unmarshal := func(d []byte) func(interface{}) error { - return func(o interface{}) error { + unmarshal := func(d []byte) func(any) error { + return func(o any) error { return yaml.Unmarshal(d, o) } } diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 459b608e96..11e83359cf 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -97,7 +97,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/file/file.go b/discovery/file/file.go index beea03222b..dfcc904fbe 100644 --- a/discovery/file/file.go +++ b/discovery/file/file.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "log/slog" + "maps" "os" "path/filepath" "strings" @@ -78,7 +79,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) @@ -120,9 +121,7 @@ func (t *TimestampCollector) Collect(ch chan<- prometheus.Metric) { t.lock.RLock() for fileSD := range t.discoverers { fileSD.lock.RLock() - for filename, timestamp := range fileSD.timestamps { - uniqueFiles[filename] = timestamp - } + maps.Copy(uniqueFiles, fileSD.timestamps) fileSD.lock.RUnlock() } t.lock.RUnlock() diff --git a/discovery/file/file_test.go b/discovery/file/file_test.go index 46b2ff0262..c80744f8c3 100644 --- a/discovery/file/file_test.go +++ b/discovery/file/file_test.go @@ -327,7 +327,6 @@ func TestInitialUpdate(t *testing.T) { "fixtures/valid.yml", "fixtures/valid.json", } { - tc := tc t.Run(tc, func(t *testing.T) { t.Parallel() @@ -348,7 +347,6 @@ func TestInvalidFile(t *testing.T) { "fixtures/invalid_nil.yml", "fixtures/invalid_nil.json", } { - tc := tc t.Run(tc, func(t *testing.T) { t.Parallel() diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index 32f1bb6722..f5d20fb740 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -98,7 +98,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/hetzner/hetzner.go b/discovery/hetzner/hetzner.go index 9245d933cc..5c5252d3d7 100644 --- a/discovery/hetzner/hetzner.go +++ b/discovery/hetzner/hetzner.go @@ -99,7 +99,7 @@ const ( ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } @@ -112,7 +112,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/http/http.go b/discovery/http/http.go index ebc1c31f61..bbaf4038c8 100644 --- a/discovery/http/http.go +++ b/discovery/http/http.go @@ -78,7 +78,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/ionos/ionos.go b/discovery/ionos/ionos.go index 4ca5194e4b..021986395b 100644 --- a/discovery/ionos/ionos.go +++ b/discovery/ionos/ionos.go @@ -106,7 +106,7 @@ func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery. } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go index 1732b96d9c..2d25213a12 100644 --- a/discovery/kubernetes/endpoints.go +++ b/discovery/kubernetes/endpoints.go @@ -83,15 +83,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, } _, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { epAddCount.Inc() e.enqueue(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { epUpdateCount.Inc() e.enqueue(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { epDeleteCount.Inc() e.enqueue(o) }, @@ -100,7 +100,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, l.Error("Error adding endpoints event handler.", "err", err) } - serviceUpdate := func(o interface{}) { + serviceUpdate := func(o any) { svc, err := convertToService(o) if err != nil { e.logger.Error("converting to Service object failed", "err", err) @@ -119,15 +119,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ // TODO(fabxc): potentially remove add and delete event handlers. Those should // be triggered via the endpoint handlers already. - AddFunc: func(o interface{}) { + AddFunc: func(o any) { svcAddCount.Inc() serviceUpdate(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { svcUpdateCount.Inc() serviceUpdate(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { svcDeleteCount.Inc() serviceUpdate(o) }, @@ -136,7 +136,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, l.Error("Error adding services event handler.", "err", err) } _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, cur interface{}) { + UpdateFunc: func(old, cur any) { podUpdateCount.Inc() oldPod, ok := old.(*apiv1.Pod) if !ok { @@ -160,15 +160,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { nodeName, err := nodeName(o) if err != nil { l.Error("Error getting Node name", "err", err) @@ -183,7 +183,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node, if e.withNamespaceMetadata { _, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) e.enqueueNamespace(namespace.Name) }, @@ -234,7 +234,7 @@ func (e *Endpoints) enqueuePod(podNamespacedName string) { } } -func (e *Endpoints) enqueue(obj interface{}) { +func (e *Endpoints) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return @@ -303,7 +303,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) return true } -func convertToEndpoints(o interface{}) (*apiv1.Endpoints, error) { +func convertToEndpoints(o any) (*apiv1.Endpoints, error) { endpoints, ok := o.(*apiv1.Endpoints) if ok { return endpoints, nil diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go index de8e7e58f6..a4b1d942ed 100644 --- a/discovery/kubernetes/endpointslice.go +++ b/discovery/kubernetes/endpointslice.go @@ -83,15 +83,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n } _, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { epslAddCount.Inc() e.enqueue(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { epslUpdateCount.Inc() e.enqueue(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { epslDeleteCount.Inc() e.enqueue(o) }, @@ -100,7 +100,7 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n l.Error("Error adding endpoint slices event handler.", "err", err) } - serviceUpdate := func(o interface{}) { + serviceUpdate := func(o any) { svc, err := convertToService(o) if err != nil { e.logger.Error("converting to Service object failed", "err", err) @@ -118,15 +118,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { svcAddCount.Inc() serviceUpdate(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { svcUpdateCount.Inc() serviceUpdate(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { svcDeleteCount.Inc() serviceUpdate(o) }, @@ -137,15 +137,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { node := o.(*apiv1.Node) e.enqueueNode(node.Name) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { nodeName, err := nodeName(o) if err != nil { l.Error("Error getting Node name", "err", err) @@ -160,7 +160,7 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n if e.withNamespaceMetadata { _, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) e.enqueueNamespace(namespace.Name) }, @@ -199,7 +199,7 @@ func (e *EndpointSlice) enqueueNamespace(namespace string) { } } -func (e *EndpointSlice) enqueue(obj interface{}) { +func (e *EndpointSlice) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go index 2e81379e07..7b74d8734d 100644 --- a/discovery/kubernetes/ingress.go +++ b/discovery/kubernetes/ingress.go @@ -56,15 +56,15 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S } _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { ingressAddCount.Inc() s.enqueue(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { ingressDeleteCount.Inc() s.enqueue(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { ingressUpdateCount.Inc() s.enqueue(o) }, @@ -75,7 +75,7 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S if s.withNamespaceMetadata { _, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) s.enqueueNamespace(namespace.Name) }, @@ -90,7 +90,7 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S return s } -func (i *Ingress) enqueue(obj interface{}) { +func (i *Ingress) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return diff --git a/discovery/kubernetes/ingress_test.go b/discovery/kubernetes/ingress_test.go index bf2d9c1769..76c9ff9036 100644 --- a/discovery/kubernetes/ingress_test.go +++ b/discovery/kubernetes/ingress_test.go @@ -16,6 +16,7 @@ package kubernetes import ( "context" "fmt" + "maps" "testing" "github.com/prometheus/common/model" @@ -193,9 +194,7 @@ func TestIngressDiscoveryNamespaces(t *testing.T) { n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedTargetGroups("ns1", TLSNo) - for k, v := range expectedTargetGroups("ns2", TLSNo) { - expected[k] = v - } + maps.Copy(expected, expectedTargetGroups("ns2", TLSNo)) k8sDiscoveryTest{ discovery: n, afterStart: func() { diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 42b1843cd9..7966de52a8 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -80,7 +80,7 @@ const ( ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } @@ -160,7 +160,7 @@ type AttachMetadataConfig struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) @@ -234,7 +234,7 @@ type NamespaceDiscovery struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(any) error) error { *c = NamespaceDiscovery{} type plain NamespaceDiscovery return unmarshal((*plain)(c)) @@ -698,7 +698,7 @@ func (d *Discovery) newNamespaceInformer(ctx context.Context) cache.SharedInform func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) if d.attachMetadata.Node { - indexers[nodeIndex] = func(obj interface{}) ([]string, error) { + indexers[nodeIndex] = func(obj any) ([]string, error) { pod, ok := obj.(*apiv1.Pod) if !ok { return nil, errors.New("object is not a pod") @@ -716,7 +716,7 @@ func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedInd func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) - indexers[podIndex] = func(obj interface{}) ([]string, error) { + indexers[podIndex] = func(obj any) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { return nil, errors.New("object is not endpoints") @@ -733,7 +733,7 @@ func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.Shar } if d.attachMetadata.Node { - indexers[nodeIndex] = func(obj interface{}) ([]string, error) { + indexers[nodeIndex] = func(obj any) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { return nil, errors.New("object is not endpoints") @@ -766,7 +766,7 @@ func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.Shar func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { indexers := make(map[string]cache.IndexFunc) - indexers[serviceIndex] = func(obj interface{}) ([]string, error) { + indexers[serviceIndex] = func(obj any) ([]string, error) { e, ok := obj.(*disv1.EndpointSlice) if !ok { return nil, errors.New("object is not an endpointslice") @@ -781,7 +781,7 @@ func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, objec } if d.attachMetadata.Node { - indexers[nodeIndex] = func(obj interface{}) ([]string, error) { + indexers[nodeIndex] = func(obj any) ([]string, error) { e, ok := obj.(*disv1.EndpointSlice) if !ok { return nil, errors.New("object is not an endpointslice") @@ -886,7 +886,7 @@ func namespacedName(namespace, name string) string { // nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone. // It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key. -func nodeName(o interface{}) (string, error) { +func nodeName(o any) (string, error) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o) if err != nil { return "", err diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index 8cb050a505..f8edec23cb 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -302,7 +302,6 @@ func TestFailuresCountMetric(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(string(tc.role), func(t *testing.T) { t.Parallel() diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go index 0e0c5745f2..8a67abb676 100644 --- a/discovery/kubernetes/node.go +++ b/discovery/kubernetes/node.go @@ -62,15 +62,15 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co } _, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { nodeAddCount.Inc() n.enqueue(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { nodeDeleteCount.Inc() n.enqueue(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { nodeUpdateCount.Inc() n.enqueue(o) }, @@ -81,7 +81,7 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co return n } -func (n *Node) enqueue(obj interface{}) { +func (n *Node) enqueue(obj any) { key, err := nodeName(obj) if err != nil { return @@ -140,7 +140,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool return true } -func convertToNode(o interface{}) (*apiv1.Node, error) { +func convertToNode(o any) (*apiv1.Node, error) { node, ok := o.(*apiv1.Node) if ok { return node, nil diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 9c9b24a1be..b58800412b 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -71,15 +71,15 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac queue: workqueue.NewNamed(RolePod.String()), } _, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { podAddCount.Inc() p.enqueue(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { podDeleteCount.Inc() p.enqueue(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { podUpdateCount.Inc() p.enqueue(o) }, @@ -90,15 +90,15 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac if p.withNodeMetadata { _, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { node := o.(*apiv1.Node) p.enqueuePodsForNode(node.Name) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { node := o.(*apiv1.Node) p.enqueuePodsForNode(node.Name) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { nodeName, err := nodeName(o) if err != nil { l.Error("Error getting Node name", "err", err) @@ -113,7 +113,7 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac if p.withNamespaceMetadata { _, err = p.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) p.enqueuePodsForNamespace(namespace.Name) }, @@ -128,7 +128,7 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac return p } -func (p *Pod) enqueue(obj interface{}) { +func (p *Pod) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return @@ -195,7 +195,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool return true } -func convertToPod(o interface{}) (*apiv1.Pod, error) { +func convertToPod(o any) (*apiv1.Pod, error) { pod, ok := o.(*apiv1.Pod) if ok { return pod, nil diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index 71f7f7e621..2cf336774a 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -16,6 +16,7 @@ package kubernetes import ( "context" "fmt" + "maps" "testing" "github.com/prometheus/common/model" @@ -437,9 +438,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) { n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}}) expected := expectedPodTargetGroups("ns1") - for k, v := range expectedPodTargetGroups("ns2") { - expected[k] = v - } + maps.Copy(expected, expectedPodTargetGroups("ns2")) k8sDiscoveryTest{ discovery: n, beforeRun: func() { diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index b4ba9ec1c3..a2e00b1032 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -61,15 +61,15 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S } _, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(o interface{}) { + AddFunc: func(o any) { svcAddCount.Inc() s.enqueue(o) }, - DeleteFunc: func(o interface{}) { + DeleteFunc: func(o any) { svcDeleteCount.Inc() s.enqueue(o) }, - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { svcUpdateCount.Inc() s.enqueue(o) }, @@ -80,7 +80,7 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S if s.withNamespaceMetadata { _, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(_, o interface{}) { + UpdateFunc: func(_, o any) { namespace := o.(*apiv1.Namespace) s.enqueueNamespace(namespace.Name) }, @@ -95,7 +95,7 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S return s } -func (s *Service) enqueue(obj interface{}) { +func (s *Service) enqueue(obj any) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return @@ -171,7 +171,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b return true } -func convertToService(o interface{}) (*apiv1.Service, error) { +func convertToService(o any) (*apiv1.Service, error) { service, ok := o.(*apiv1.Service) if ok { return service, nil diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 033025f840..fe61e122e4 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -112,7 +112,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/manager.go b/discovery/manager.go index 51a46ca231..6688152da9 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "log/slog" + "maps" "reflect" "sync" "time" @@ -37,7 +38,7 @@ type poolKey struct { type Provider struct { name string d Discoverer - config interface{} + config any cancel context.CancelFunc // done should be called after cleaning up resources associated with cancelled provider. @@ -62,7 +63,7 @@ func (p *Provider) IsStarted() bool { return p.cancel != nil } -func (p *Provider) Config() interface{} { +func (p *Provider) Config() any { return p.config } @@ -255,9 +256,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { } if l := len(refTargets); l > 0 { m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) - for k, v := range refTargets { - m.targets[poolKey{s, prov.name}][k] = v - } + maps.Copy(m.targets[poolKey{s, prov.name}], refTargets) } } m.targetsMtx.Unlock() diff --git a/discovery/manager_test.go b/discovery/manager_test.go index ab1cec88de..13b41066c3 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -668,7 +668,6 @@ func TestTargetUpdatesOrder(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(tc.title, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -1350,7 +1349,6 @@ func TestCoordinationWithReceiver(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.title, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -1471,7 +1469,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { wg.Add(2000) start := make(chan struct{}) - for i := 0; i < 1000; i++ { + for range 1000 { go func() { <-start td.update([]*targetgroup.Group{ @@ -1485,7 +1483,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) { }() } - for i := 0; i < 1000; i++ { + for i := range 1000 { go func(i int) { <-start c := map[string]Configs{ @@ -1545,7 +1543,7 @@ func (t *testDiscoverer) update(tgs []*targetgroup.Group) { func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() // Check that all metrics can be unregistered, allowing a second manager to be created. - for i := 0; i < 2; i++ { + for range 2 { ctx, cancel := context.WithCancel(context.Background()) refreshMetrics, sdMetrics := NewTestMetrics(t, reg) diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 0c2c2e9702..cae040ca98 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -101,7 +101,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) @@ -513,7 +513,7 @@ func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32 ports := make([]uint32, len(portMappings)) labels := make([]map[string]string, len(portMappings)) - for i := 0; i < len(portMappings); i++ { + for i := range portMappings { labels[i] = portMappings[i].Labels if containerNet { diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 2b640dea82..cb5577a131 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -103,7 +103,7 @@ func (c *DockerSDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultDockerSDConfig type plain DockerSDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/moby/dockerswarm.go b/discovery/moby/dockerswarm.go index 57c0af7171..44abb0ab25 100644 --- a/discovery/moby/dockerswarm.go +++ b/discovery/moby/dockerswarm.go @@ -90,7 +90,7 @@ func (c *DockerSwarmSDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultDockerSwarmSDConfig type plain DockerSwarmSDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/moby/tasks.go b/discovery/moby/tasks.go index 46e8a06d01..8a3dbe8101 100644 --- a/discovery/moby/tasks.go +++ b/discovery/moby/tasks.go @@ -16,6 +16,7 @@ package moby import ( "context" "fmt" + "maps" "net" "strconv" @@ -81,13 +82,9 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err } } - for k, v := range serviceLabels[s.ServiceID] { - commonLabels[k] = v - } + maps.Copy(commonLabels, serviceLabels[s.ServiceID]) - for k, v := range nodeLabels[s.NodeID] { - commonLabels[k] = v - } + maps.Copy(commonLabels, nodeLabels[s.NodeID]) for _, p := range s.Status.PortStatus.Ports { if p.Protocol != swarm.PortConfigProtocolTCP { diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go index 7516308026..e204b740f7 100644 --- a/discovery/nomad/nomad.go +++ b/discovery/nomad/nomad.go @@ -93,7 +93,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 6c2f79b3a4..58bf154555 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "log/slog" + "maps" "net" "strconv" @@ -206,7 +207,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v) } for pool, address := range s.Addresses { - md, ok := address.([]interface{}) + md, ok := address.([]any) if !ok { i.logger.Warn("Invalid type for address, expected array") continue @@ -216,7 +217,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, continue } for _, address := range md { - md1, ok := address.(map[string]interface{}) + md1, ok := address.(map[string]any) if !ok { i.logger.Warn("Invalid type for address, expected dict") continue @@ -230,9 +231,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, continue } lbls := make(model.LabelSet, len(labels)) - for k, v := range labels { - lbls[k] = v - } + maps.Copy(lbls, labels) lbls[openstackLabelAddressPool] = model.LabelValue(pool) lbls[openstackLabelPrivateIP] = model.LabelValue(addr) if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok { diff --git a/discovery/openstack/openstack.go b/discovery/openstack/openstack.go index d7b58787a1..7f23757297 100644 --- a/discovery/openstack/openstack.go +++ b/discovery/openstack/openstack.go @@ -103,7 +103,7 @@ const ( ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } @@ -116,7 +116,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/ovhcloud/ovhcloud.go b/discovery/ovhcloud/ovhcloud.go index 972d49f25f..69c7cd6004 100644 --- a/discovery/ovhcloud/ovhcloud.go +++ b/discovery/ovhcloud/ovhcloud.go @@ -66,7 +66,7 @@ func (SDConfig) Name() string { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index e249bc4afa..a5163addb0 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -102,7 +102,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/puppetdb/resources.go b/discovery/puppetdb/resources.go index d6387fe1b8..487c471c1b 100644 --- a/discovery/puppetdb/resources.go +++ b/discovery/puppetdb/resources.go @@ -34,7 +34,7 @@ type Resource struct { Parameters Parameters `json:"parameters"` } -type Parameters map[string]interface{} +type Parameters map[string]any func (p *Parameters) toLabels() model.LabelSet { labels := model.LabelSet{} @@ -52,7 +52,7 @@ func (p *Parameters) toLabels() model.LabelSet { labelValue = strconv.FormatFloat(value, 'g', -1, 64) case []string: labelValue = separator + strings.Join(value, separator) + separator - case []interface{}: + case []any: if len(value) == 0 { continue } @@ -72,7 +72,7 @@ func (p *Parameters) toLabels() model.LabelSet { } } labelValue = strings.Join(values, separator) - case map[string]interface{}: + case map[string]any: subParameter := Parameters(value) prefix := strutil.SanitizeLabelName(k + "_") for subk, subv := range subParameter.toLabels() { diff --git a/discovery/registry.go b/discovery/registry.go index 92fa3d3d16..98c956704e 100644 --- a/discovery/registry.go +++ b/discovery/registry.go @@ -110,7 +110,7 @@ func getConfigType(out reflect.Type) reflect.Type { // UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs // that have a Configs field that should be inlined. -func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { +func UnmarshalYAMLWithInlineConfigs(out any, unmarshal func(any) error) error { outVal := reflect.ValueOf(out) if outVal.Kind() != reflect.Ptr { return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) @@ -198,7 +198,7 @@ func readConfigs(structVal reflect.Value, startField int) (Configs, error) { // MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs // that have a Configs field that should be inlined. -func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { +func MarshalYAMLWithInlineConfigs(in any) (any, error) { inVal := reflect.ValueOf(in) for inVal.Kind() == reflect.Ptr { inVal = inVal.Elem() diff --git a/discovery/scaleway/scaleway.go b/discovery/scaleway/scaleway.go index 5d5284dab0..d617e01905 100644 --- a/discovery/scaleway/scaleway.go +++ b/discovery/scaleway/scaleway.go @@ -55,7 +55,7 @@ const ( ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *role) UnmarshalYAML(unmarshal func(any) error) error { if err := unmarshal((*string)(c)); err != nil { return err } @@ -125,7 +125,7 @@ func (c SDConfig) secretKeyForConfig() string { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/stackit/stackit.go b/discovery/stackit/stackit.go index 030f2bdb55..351526e016 100644 --- a/discovery/stackit/stackit.go +++ b/discovery/stackit/stackit.go @@ -95,7 +95,7 @@ type refresher interface { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/stackit/types.go b/discovery/stackit/types.go index 66681c3455..84b7d0266c 100644 --- a/discovery/stackit/types.go +++ b/discovery/stackit/types.go @@ -20,14 +20,14 @@ type ServerListResponse struct { } type Server struct { - AvailabilityZone string `json:"availabilityZone"` - ID string `json:"id"` - Labels map[string]interface{} `json:"labels"` - MachineType string `json:"machineType"` - Name string `json:"name"` - Nics []ServerNetwork `json:"nics"` - PowerStatus string `json:"powerStatus"` - Status string `json:"status"` + AvailabilityZone string `json:"availabilityZone"` + ID string `json:"id"` + Labels map[string]any `json:"labels"` + MachineType string `json:"machineType"` + Name string `json:"name"` + Nics []ServerNetwork `json:"nics"` + PowerStatus string `json:"powerStatus"` + Status string `json:"status"` } // ServerNetwork Describes the object that matches servers to its networks. diff --git a/discovery/targetgroup/targetgroup.go b/discovery/targetgroup/targetgroup.go index e74870f046..5c3b67d6e8 100644 --- a/discovery/targetgroup/targetgroup.go +++ b/discovery/targetgroup/targetgroup.go @@ -37,7 +37,7 @@ func (tg Group) String() string { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (tg *Group) UnmarshalYAML(unmarshal func(any) error) error { g := struct { Targets []string `yaml:"targets"` Labels model.LabelSet `yaml:"labels"` @@ -56,7 +56,7 @@ func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements the yaml.Marshaler interface. -func (tg Group) MarshalYAML() (interface{}, error) { +func (tg Group) MarshalYAML() (any, error) { g := &struct { Targets []string `yaml:"targets"` Labels model.LabelSet `yaml:"labels,omitempty"` diff --git a/discovery/targetgroup/targetgroup_test.go b/discovery/targetgroup/targetgroup_test.go index 1e79859c6e..e0df05ab9a 100644 --- a/discovery/targetgroup/targetgroup_test.go +++ b/discovery/targetgroup/targetgroup_test.go @@ -93,7 +93,7 @@ func TestTargetGroupJSONMarshal(t *testing.T) { } func TestTargetGroupYamlMarshal(t *testing.T) { - marshal := func(g interface{}) []byte { + marshal := func(g any) []byte { d, err := yaml.Marshal(g) if err != nil { panic(err) @@ -134,8 +134,8 @@ func TestTargetGroupYamlMarshal(t *testing.T) { } func TestTargetGroupYamlUnmarshal(t *testing.T) { - unmarshal := func(d []byte) func(interface{}) error { - return func(o interface{}) error { + unmarshal := func(d []byte) func(any) error { + return func(o any) error { return yaml.Unmarshal(d, o) } } diff --git a/discovery/triton/triton.go b/discovery/triton/triton.go index 5efe49e23d..9300753015 100644 --- a/discovery/triton/triton.go +++ b/discovery/triton/triton.go @@ -91,7 +91,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 01cbbca361..6419d8d365 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -133,7 +133,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig err := unmarshal((*plain)(c)) @@ -164,7 +164,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) { var result string - err := rpcclient.Call("auth.login", []interface{}{user, pass, duration}, &result) + err := rpcclient.Call("auth.login", []any{user, pass, duration}, &result) return result, err } @@ -174,7 +174,7 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, enti SystemGroups []systemGroupID `xmlrpc:"system_groups"` } - err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []interface{}{token, entitlement}, &systemGroupsInfos) + err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []any{token, entitlement}, &systemGroupsInfos) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, enti func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) { var networkInfos []networkInfo - err := rpcclient.Call("system.getNetworkForSystems", []interface{}{token, systemIDs}, &networkInfos) + err := rpcclient.Call("system.getNetworkForSystems", []any{token, systemIDs}, &networkInfos) if err != nil { return nil, err } @@ -208,7 +208,7 @@ func getEndpointInfoForSystems( var endpointInfos []endpointInfo err := rpcclient.Call( "system.monitoring.listEndpoints", - []interface{}{token, systemIDs}, &endpointInfos) + []any{token, systemIDs}, &endpointInfos) return endpointInfos, err } diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go index 0ab477438b..79a7a0179f 100644 --- a/discovery/vultr/vultr.go +++ b/discovery/vultr/vultr.go @@ -95,7 +95,7 @@ func (c *SDConfig) SetDirectory(dir string) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultSDConfig type plain SDConfig if err := unmarshal((*plain)(c)); err != nil { diff --git a/discovery/xds/kuma.go b/discovery/xds/kuma.go index 61f348b87d..82ca8f2c9a 100644 --- a/discovery/xds/kuma.go +++ b/discovery/xds/kuma.go @@ -65,7 +65,7 @@ func (*KumaSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discove } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultKumaSDConfig type plainKumaConf KumaSDConfig err := unmarshal((*plainKumaConf)(c)) diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index e0b1d580db..d5239324cb 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -72,7 +72,7 @@ func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (dis } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultServersetSDConfig type plain ServersetSDConfig err := unmarshal((*plain)(c)) @@ -114,7 +114,7 @@ func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discove } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultNerveSDConfig type plain NerveSDConfig err := unmarshal((*plain)(c)) diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index d7720ccc8c..d20a049e77 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -3270,7 +3270,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) { it = c.h.AllReverseBucketIterator() length := len(c.expPositiveBuckets) - for j := 0; j < length; j++ { + for j := range length { i := length - j - 1 b := c.expPositiveBuckets[i] require.True(t, it.Next(), "all reverse bucket iterator exhausted too early") @@ -3286,7 +3286,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) { require.False(t, it.Next(), "positive bucket iterator not exhausted") it = c.h.PositiveReverseBucketIterator() - for j := 0; j < length; j++ { + for j := range length { i := length - j - 1 b := c.expPositiveBuckets[i] require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early") diff --git a/model/histogram/generic.go b/model/histogram/generic.go index a36b58d069..90a94a5600 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -402,7 +402,7 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB } var last IBC - for i := 0; i < len(buckets); i++ { + for i := range buckets { var c IBC if deltas { c = last + buckets[i] diff --git a/model/histogram/test_utils.go b/model/histogram/test_utils.go index e6b33863bd..a4871ada31 100644 --- a/model/histogram/test_utils.go +++ b/model/histogram/test_utils.go @@ -22,7 +22,7 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide)) var histograms []*Histogram - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { h := &Histogram{ Count: uint64(i) + observationCount, ZeroCount: uint64(i), @@ -35,13 +35,13 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { PositiveBuckets: make([]int64, bucketsPerSide), } - for j := 0; j < numSpans; j++ { + for j := range numSpans { s := Span{Offset: 1, Length: spanLength} h.NegativeSpans[j] = s h.PositiveSpans[j] = s } - for j := 0; j < bucketsPerSide; j++ { + for j := range bucketsPerSide { h.NegativeBuckets[j] = 1 h.PositiveBuckets[j] = 1 } diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 8169d038f5..8345c12d16 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -84,12 +84,12 @@ func (ls *Labels) UnmarshalJSON(b []byte) error { } // MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { +func (ls Labels) MarshalYAML() (any, error) { return ls.Map(), nil } // UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (ls *Labels) UnmarshalYAML(unmarshal func(any) error) error { var m map[string]string if err := unmarshal(&m); err != nil { diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 4b23748a91..8850278a21 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -579,7 +579,7 @@ func TestLabels_DropReserved(t *testing.T) { func ScratchBuilderForBenchmark() ScratchBuilder { // (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.) b := NewScratchBuilder(256) - for i := 0; i < 256; i++ { + for i := range 256 { b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i)) } b.Labels() @@ -625,7 +625,7 @@ func FromStringsForBenchmark(ss ...string) Labels { func BenchmarkLabels_Get(b *testing.B) { maxLabels := 30 allLabels := make([]Label, maxLabels) - for i := 0; i < maxLabels; i++ { + for i := range maxLabels { allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))} } for _, size := range []int{5, 10, maxLabels} { @@ -906,7 +906,7 @@ func BenchmarkLabels_Hash(b *testing.B) { name: "typical labels under 1KB", lbls: func() Labels { b := NewBuilder(EmptyLabels()) - for i := 0; i < 10; i++ { + for i := range 10 { // Label ~20B name, 50B value. b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) } @@ -917,7 +917,7 @@ func BenchmarkLabels_Hash(b *testing.B) { name: "bigger labels over 1KB", lbls: func() Labels { b := NewBuilder(EmptyLabels()) - for i := 0; i < 10; i++ { + for i := range 10 { // Label ~50B name, 50B value. b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)) } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index bb6fce3104..353d283329 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -114,9 +114,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) { testValues = append(testValues, generateRandomValues()...) for _, r := range regexes { - r := r for _, v := range testValues { - v := v t.Run(readable(r)+` on "`+readable(v)+`"`, func(t *testing.T) { t.Parallel() m, err := NewFastRegexMatcher(r) @@ -245,7 +243,6 @@ func TestFindSetMatches(t *testing.T) { // too many combinations {"[a-z][a-z]", nil, false}, } { - c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) @@ -416,7 +413,6 @@ func TestStringMatcherFromRegexp(t *testing.T) { {"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}}, {"f.?o", nil}, } { - c := c t.Run(c.pattern, func(t *testing.T) { t.Parallel() parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL) @@ -683,7 +679,7 @@ func randString(randGenerator *rand.Rand, length int) string { func randStrings(randGenerator *rand.Rand, many, length int) []string { out := make([]string, 0, many) - for i := 0; i < many; i++ { + for range many { out = append(out, randString(randGenerator, length)) } return out diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index d6e809bc6f..c7c5439d54 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -69,7 +69,7 @@ const ( ) // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (a *Action) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -105,7 +105,7 @@ type Config struct { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *Config) UnmarshalYAML(unmarshal func(any) error) error { *c = DefaultRelabelConfig type plain Config if err := unmarshal((*plain)(c)); err != nil { @@ -207,7 +207,7 @@ func MustNewRegexp(s string) Regexp { } // UnmarshalYAML implements the yaml.Unmarshaler interface. -func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (re *Regexp) UnmarshalYAML(unmarshal func(any) error) error { var s string if err := unmarshal(&s); err != nil { return err @@ -221,7 +221,7 @@ func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { } // MarshalYAML implements the yaml.Marshaler interface. -func (re Regexp) MarshalYAML() (interface{}, error) { +func (re Regexp) MarshalYAML() (any, error) { if re.String() != "" { return re.String(), nil } diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index abbc8c5a66..4e592167f3 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -436,10 +436,7 @@ func (p *OpenMetricsParser) nextToken() token { } func (p *OpenMetricsParser) parseError(exp string, got token) error { - e := p.l.i + 1 - if len(p.l.b) < e { - e = len(p.l.b) - } + e := min(len(p.l.b), p.l.i+1) return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } diff --git a/model/textparse/promparse.go b/model/textparse/promparse.go index 6c782464a2..2b4b750b4d 100644 --- a/model/textparse/promparse.go +++ b/model/textparse/promparse.go @@ -291,10 +291,7 @@ func (p *PromParser) nextToken() token { } func (p *PromParser) parseError(exp string, got token) error { - e := p.l.i + 1 - if len(p.l.b) < e { - e = len(p.l.b) - } + e := min(len(p.l.b), p.l.i+1) return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index ff4975013d..e7ce710491 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -36,7 +36,7 @@ import ( // floatFormatBufPool is exclusively used in formatOpenMetricsFloat. var floatFormatBufPool = sync.Pool{ - New: func() interface{} { + New: func() any { // To contain at most 17 digits and additional syntax for a float64. b := make([]byte, 0, 24) return &b diff --git a/prompb/codec.go b/prompb/codec.go index b2574fd9e1..6cc0cdc861 100644 --- a/prompb/codec.go +++ b/prompb/codec.go @@ -130,7 +130,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { func spansProtoToSpans(s []BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} } @@ -183,7 +183,7 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram func spansToSpansProto(s []histogram.Span) []BucketSpan { spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } diff --git a/prompb/io/prometheus/client/decoder_test.go b/prompb/io/prometheus/client/decoder_test.go index 18cf186127..8478fe3ef5 100644 --- a/prompb/io/prometheus/client/decoder_test.go +++ b/prompb/io/prometheus/client/decoder_test.go @@ -177,7 +177,7 @@ func TestMetricStreamingDecoder(t *testing.T) { func TestMetricStreamingDecoder_LabelsCorruption(t *testing.T) { lastScrapeSize := 0 var allPreviousLabels []labels.Labels - buffers := pool.New(128, 1024, 2, func(sz int) interface{} { return make([]byte, 0, sz) }) + buffers := pool.New(128, 1024, 2, func(sz int) any { return make([]byte, 0, sz) }) builder := labels.NewScratchBuilder(0) for _, labelsCount := range []int{1, 2, 3, 5, 8, 5, 3, 2, 1} { // Get buffer from pool like in scrape.go @@ -230,7 +230,7 @@ func generateMetricFamilyText(labelsCount int) string { randomName := fmt.Sprintf("metric_%d", rand.Intn(1000)) randomHelp := fmt.Sprintf("Test metric to demonstrate forced corruption %d.", rand.Intn(1000)) labels10 := "" - for i := 0; i < labelsCount; i++ { + for range labelsCount { labels10 += generateLabels() } return fmt.Sprintf(`name: "%s" diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go index 4434c525fc..8f119d6d01 100644 --- a/prompb/io/prometheus/write/v2/codec.go +++ b/prompb/io/prometheus/write/v2/codec.go @@ -142,7 +142,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { func spansProtoToSpans(s []BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} } @@ -200,7 +200,7 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan { return nil } spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } diff --git a/promql/bench_test.go b/promql/bench_test.go index d55137071b..d425565788 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -43,24 +43,24 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in // These metrics will have data for all test time range metrics = append(metrics, labels.FromStrings("__name__", "a_one")) metrics = append(metrics, labels.FromStrings("__name__", "b_one")) - for j := 0; j < 10; j++ { + for j := range 10 { metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", strconv.Itoa(j))) } metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", "+Inf")) - for i := 0; i < 10; i++ { + for i := range 10 { metrics = append(metrics, labels.FromStrings("__name__", "a_ten", "l", strconv.Itoa(i))) metrics = append(metrics, labels.FromStrings("__name__", "b_ten", "l", strconv.Itoa(i))) - for j := 0; j < 10; j++ { + for j := range 10 { metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", strconv.Itoa(j))) } metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", "+Inf")) } - for i := 0; i < 100; i++ { + for i := range 100 { metrics = append(metrics, labels.FromStrings("__name__", "a_hundred", "l", strconv.Itoa(i))) metrics = append(metrics, labels.FromStrings("__name__", "b_hundred", "l", strconv.Itoa(i))) - for j := 0; j < 10; j++ { + for j := range 10 { metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", strconv.Itoa(j))) } metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", "+Inf")) @@ -70,7 +70,7 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in // Number points for each different label value of "l" for the sparse series pointsPerSparseSeries := numIntervals / 50 - for s := 0; s < numIntervals; s++ { + for s := range numIntervals { a := stor.Appender(context.Background()) ts := int64(s * interval) for i, metric := range metrics { @@ -525,7 +525,7 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage // Generate http_server_request_duration_seconds_count metrics with instance and job labels, and http_status_code label. // the classic target_info metrics is gauge type. metrics := make([]labels.Labels, 0, infoSeriesNum+len(statusCodes)) - for i := 0; i < infoSeriesNum; i++ { + for i := range infoSeriesNum { clusterName := "us-east" if i >= infoSeriesNum/2 { clusterName = "eu-south" @@ -550,7 +550,7 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage // Append the generated metrics and samples to the storage. refs := make([]storage.SeriesRef, len(metrics)) - for i := 0; i < numIntervals; i++ { + for i := range numIntervals { a := stor.Appender(context.Background()) ts := int64(i * interval) for j, metric := range metrics[:infoSeriesNum] { diff --git a/promql/engine.go b/promql/engine.go index d476e28cf4..92bedc9ac3 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -633,7 +633,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota logger := slog.New(l) f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append. - params := make(map[string]interface{}, 4) + params := make(map[string]any, 4) params["query"] = q.q if eq, ok := q.Statement().(*parser.EvalStmt); ok { params["start"] = formatDate(eq.Start) @@ -650,7 +650,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota f = append(f, slog.Any("spanID", span.SpanContext().SpanID())) } if origin := ctx.Value(QueryOrigin{}); origin != nil { - for k, v := range origin.(map[string]interface{}) { + for k, v := range origin.(map[string]any) { f = append(f, slog.Any(k, v)) } } @@ -1082,7 +1082,7 @@ type evaluator struct { } // errorf causes a panic with the input formatted into an error. -func (ev *evaluator) errorf(format string, args ...interface{}) { +func (ev *evaluator) errorf(format string, args ...any) { ev.error(fmt.Errorf(format, args...)) } @@ -1792,10 +1792,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, mat := make(Matrix, 0, len(selVS.Series)) // Output matrix. offset := durationMilliseconds(selVS.Offset) selRange := durationMilliseconds(sel.Range) - stepRange := selRange - if stepRange > ev.interval { - stepRange = ev.interval - } + stepRange := min(selRange, ev.interval) // Reuse objects across steps to save memory allocations. var floats []FPoint var histograms []HPoint @@ -3327,10 +3324,7 @@ seriesLoop: var r float64 switch op { case parser.TOPK, parser.BOTTOMK, parser.LIMITK: - k = int64(fParam) - if k > int64(len(inputMatrix)) { - k = int64(len(inputMatrix)) - } + k = min(int64(fParam), int64(len(inputMatrix))) if k < 1 { if enh.Ts != ev.endTimestamp { advanceRemainingSeries(enh.Ts, si+1) @@ -3697,7 +3691,7 @@ func changesMetricSchema(op parser.ItemType) bool { } // NewOriginContext returns a new context with data about the origin attached. -func NewOriginContext(ctx context.Context, data map[string]interface{}) context.Context { +func NewOriginContext(ctx context.Context, data map[string]any) context.Context { return context.WithValue(ctx, QueryOrigin{}, data) } diff --git a/promql/engine_test.go b/promql/engine_test.go index 536f4cac62..5cd7fcb83f 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -94,7 +94,7 @@ func TestQueryConcurrency(t *testing.T) { } var wg sync.WaitGroup - for i := 0; i < maxConcurrency; i++ { + for range maxConcurrency { q := engine.NewTestQuery(f) wg.Add(1) go func() { @@ -134,7 +134,7 @@ func TestQueryConcurrency(t *testing.T) { } // Terminate remaining queries. - for i := 0; i < maxConcurrency; i++ { + for range maxConcurrency { block <- struct{}{} } @@ -2193,7 +2193,7 @@ func TestQueryLogger_basic(t *testing.T) { engine.SetQueryLogger(f1) queryExec() logLines := getLogLines(t, ql1File) - require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"}) + require.Contains(t, logLines[0], "params", map[string]any{"query": "test statement"}) require.Len(t, logLines, 1) l := len(logLines) @@ -2246,7 +2246,7 @@ func TestQueryLogger_fields(t *testing.T) { engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]any{"foo": "bar"}) defer cancelCtx() query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") @@ -2279,7 +2279,7 @@ func TestQueryLogger_error(t *testing.T) { engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]any{"foo": "bar"}) defer cancelCtx() testErr := errors.New("failure") query := engine.NewTestQuery(func(context.Context) error { @@ -2291,7 +2291,7 @@ func TestQueryLogger_error(t *testing.T) { logLines := getLogLines(t, ql1File) require.Contains(t, logLines[0], "error", testErr) - require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"}) + require.Contains(t, logLines[0], "params", map[string]any{"query": "test statement"}) } func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { @@ -3339,7 +3339,6 @@ metric 0 1 2 } for _, c := range cases { - c := c t.Run(c.name, func(t *testing.T) { engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) storage := promqltest.LoadedStorage(t, load) @@ -3987,7 +3986,7 @@ func TestSubQueryHistogramsCopy(t *testing.T) { testQuery := `rate({__name__="http_request_duration_seconds"}[3m])` ctx := context.Background() - for i := 0; i < 100; i++ { + for range 100 { queryable := promqltest.LoadedStorage(t, load) engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) @@ -3998,7 +3997,7 @@ func TestSubQueryHistogramsCopy(t *testing.T) { queryable.Close() } - for i := 0; i < 100; i++ { + for range 100 { queryable := promqltest.LoadedStorage(t, load) engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) diff --git a/promql/functions.go b/promql/functions.go index aecd647c9b..01b37603e4 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -230,10 +230,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? - minSchema := prev.Schema - if last.Schema < minSchema { - minSchema = last.Schema - } + minSchema := min(last.Schema, prev.Schema) for _, currPoint := range points[1 : len(points)-1] { curr := currPoint.H if curr == nil { @@ -1893,11 +1890,11 @@ func (s vectorByValueHeap) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s *vectorByValueHeap) Push(x interface{}) { +func (s *vectorByValueHeap) Push(x any) { *s = append(*s, *(x.(*Sample))) } -func (s *vectorByValueHeap) Pop() interface{} { +func (s *vectorByValueHeap) Pop() any { old := *s n := len(old) el := old[n-1] @@ -1923,11 +1920,11 @@ func (s vectorByReverseValueHeap) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s *vectorByReverseValueHeap) Push(x interface{}) { +func (s *vectorByReverseValueHeap) Push(x any) { *s = append(*s, *(x.(*Sample))) } -func (s *vectorByReverseValueHeap) Pop() interface{} { +func (s *vectorByReverseValueHeap) Pop() any { old := *s n := len(old) el := old[n-1] @@ -1975,7 +1972,7 @@ func stringFromArg(e parser.Expr) string { func stringSliceFromArgs(args parser.Expressions) []string { tmp := make([]string, len(args)) - for i := 0; i < len(args); i++ { + for i := range args { tmp[i] = stringFromArg(args[i]) } return tmp diff --git a/promql/functions_test.go b/promql/functions_test.go index 9ee0ba51dc..8dd91e7537 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -50,7 +50,7 @@ func TestDeriv(t *testing.T) { interval = 30 * 1000 // Introduce some timestamp jitter to test 0 slope case. // https://github.com/prometheus/prometheus/issues/7180 - for i = 0; i < 15; i++ { + for i = range int64(15) { jitter := 12 * i % 2 a.Append(0, metric, start+interval*i+jitter, 1) } diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 2a48942956..72bb77764a 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -347,7 +347,7 @@ func (l *Lexer) acceptRun(valid string) { // errorf returns an error token and terminates the scan by passing // back a nil pointer that will be the next state, terminating l.NextItem. -func (l *Lexer) errorf(format string, args ...interface{}) stateFn { +func (l *Lexer) errorf(format string, args ...any) stateFn { *l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)} l.scannedItem = true diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 138054c537..85dc3c899b 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -34,7 +34,7 @@ import ( ) var parserPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &parser{} }, } @@ -62,7 +62,7 @@ type parser struct { yyParser yyParserImpl - generatedParserResult interface{} + generatedParserResult any parseErrors ParseErrors } @@ -273,7 +273,7 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue } // addParseErrf formats the error and appends it to the list of parsing errors. -func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) { +func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...any) { p.addParseErr(positionRange, fmt.Errorf(format, args...)) } @@ -475,13 +475,13 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node, overread bool) ( } // newMap is used when building the FloatHistogram from a map. -func (*parser) newMap() (ret map[string]interface{}) { - return map[string]interface{}{} +func (*parser) newMap() (ret map[string]any) { + return map[string]any{} } // mergeMaps is used to combine maps as they're used to later build the Float histogram. // This will merge the right map into the left map. -func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) { +func (p *parser) mergeMaps(left, right *map[string]any) (ret *map[string]any) { for key, value := range *right { if _, ok := (*left)[key]; ok { p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key) @@ -530,7 +530,7 @@ func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint6 } // buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it. -func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram { +func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHistogram { output := &histogram.FloatHistogram{} val, ok := (*desc)["schema"] @@ -623,7 +623,7 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram. return output } -func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string, +func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]any, bucketsKey, offsetKey string, ) (buckets []float64, spans []histogram.Span) { bucketCount := 0 val, ok := (*desc)[bucketsKey] @@ -896,7 +896,7 @@ func parseDuration(ds string) (time.Duration, error) { // parseGenerated invokes the yacc generated parser. // The generated parser gets the provided startSymbol injected into // the lexer stream, based on which grammar will be used. -func (p *parser) parseGenerated(startSymbol ItemType) interface{} { +func (p *parser) parseGenerated(startSymbol ItemType) any { p.InjectItem(startSymbol) p.yyParser.Parse(p) diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index bf9a467076..d310984fa8 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -32,7 +32,7 @@ import ( ) func repeatError(query string, err error, start, startStep, end, endStep, count int) (errs ParseErrors) { - for i := 0; i < count; i++ { + for i := range count { errs = append(errs, ParseErr{ PositionRange: posrange.PositionRange{ Start: posrange.Pos(start + (i * startStep)), diff --git a/promql/promql_test.go b/promql/promql_test.go index 345ecab5ed..175b0a0d68 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -61,12 +61,11 @@ func TestConcurrentRangeQueries(t *testing.T) { // Limit the number of queries running at the same time. const numConcurrent = 4 sem := make(chan struct{}, numConcurrent) - for i := 0; i < numConcurrent; i++ { + for range numConcurrent { sem <- struct{}{} } var g errgroup.Group for _, c := range cases { - c := c if strings.Contains(c.expr, "count_values") && c.steps > 10 { continue // This test is too big to run with -race. } diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 654a94db35..689e37ee7f 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -219,7 +219,7 @@ func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) } //go:embed testdata var testsFs embed.FS -func raise(line int, format string, v ...interface{}) error { +func raise(line int, format string, v ...any) error { return &parser.ParseErr{ LineOffset: line, Err: fmt.Errorf(format, v...), @@ -1527,7 +1527,7 @@ func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) { func (ll *LazyLoader) parse(input string) error { lines := getLines(input) // Accepts only 'load' command. - for i := 0; i < len(lines); i++ { + for i := range lines { l := lines[i] if len(l) == 0 { continue diff --git a/promql/query_logger.go b/promql/query_logger.go index c0a70b66d7..5923223aa0 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -195,7 +195,7 @@ func newJSONEntry(query string, logger *slog.Logger) []byte { } func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) { - for i := 0; i < maxConcurrent; i++ { + for i := range maxConcurrent { tracker.getNextIndex <- 1 + (i * entrySize) } } diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index eb06e513ef..47a6d1a25d 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -48,7 +48,7 @@ func TestQueryLogging(t *testing.T) { } // Check for inserts of queries. - for i := 0; i < 4; i++ { + for i := range 4 { start := 1 + i*entrySize end := start + entrySize @@ -60,7 +60,7 @@ func TestQueryLogging(t *testing.T) { } // Check if all queries have been deleted. - for i := 0; i < 4; i++ { + for i := range 4 { queryLogger.Delete(1 + i*entrySize) } require.True(t, regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1:1+entrySize*4]), @@ -94,7 +94,7 @@ func TestIndexReuse(t *testing.T) { } // Check all bytes and verify new query was inserted at index 2 - for i := 0; i < 3; i++ { + for i := range 3 { start := 1 + i*entrySize end := start + entrySize diff --git a/promql/value.go b/promql/value.go index c2db0833ee..b909085b17 100644 --- a/promql/value.go +++ b/promql/value.go @@ -45,7 +45,7 @@ func (s String) String() string { } func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V}) + return json.Marshal([...]any{float64(s.T) / 1000, s.V}) } // Scalar is a data point that's explicitly not associated with a metric. @@ -61,7 +61,7 @@ func (s Scalar) String() string { func (s Scalar) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(s.V, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(s.T) / 1000, v}) + return json.Marshal([...]any{float64(s.T) / 1000, v}) } // Series is a stream of data points belonging to a metric. @@ -111,7 +111,7 @@ func (p FPoint) String() string { // timestamp. func (p FPoint) MarshalJSON() ([]byte, error) { v := strconv.FormatFloat(p.F, 'f', -1, 64) - return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) + return json.Marshal([...]any{float64(p.T) / 1000, v}) } // HPoint represents a single histogram data point for a given timestamp. @@ -136,9 +136,9 @@ func (p HPoint) String() string { // timestamp. func (p HPoint) MarshalJSON() ([]byte, error) { h := struct { - Count string `json:"count"` - Sum string `json:"sum"` - Buckets [][]interface{} `json:"buckets,omitempty"` + Count string `json:"count"` + Sum string `json:"sum"` + Buckets [][]any `json:"buckets,omitempty"` }{ Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64), Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64), @@ -161,7 +161,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) { boundaries = 0 // Inclusive only on upper end AKA left open. } } - bucketToMarshal := []interface{}{ + bucketToMarshal := []any{ boundaries, strconv.FormatFloat(bucket.Lower, 'f', -1, 64), strconv.FormatFloat(bucket.Upper, 'f', -1, 64), @@ -169,7 +169,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) { } h.Buckets = append(h.Buckets, bucketToMarshal) } - return json.Marshal([...]interface{}{float64(p.T) / 1000, h}) + return json.Marshal([...]any{float64(p.T) / 1000, h}) } // size returns the size of the HPoint compared to the size of an FPoint. diff --git a/rules/alerting.go b/rules/alerting.go index 77d53395e0..00e457843e 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -596,10 +596,7 @@ func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay if alert.needsSending(ts, resendDelay) { alert.LastSentAt = ts // Allow for two Eval or Alertmanager send failures. - delta := resendDelay - if interval > resendDelay { - delta = interval - } + delta := max(interval, resendDelay) alert.ValidUntil = ts.Add(4 * delta) anew := *alert // The notifier re-uses the labels slice, hence make a copy. diff --git a/rules/group.go b/rules/group.go index d2d4a69fc1..8faf930b77 100644 --- a/rules/group.go +++ b/rules/group.go @@ -17,6 +17,7 @@ import ( "context" "errors" "log/slog" + "maps" "math" "slices" "strings" @@ -215,7 +216,7 @@ func (g *Group) run(ctx context.Context) { return } - ctx = promql.NewOriginContext(ctx, map[string]interface{}{ + ctx = promql.NewOriginContext(ctx, map[string]any{ "ruleGroup": map[string]string{ "file": g.File(), "name": g.Name(), @@ -482,9 +483,7 @@ func (g *Group) CopyState(from *Group) { continue } - for fp, a := range far.active { - ar.active[fp] = a - } + maps.Copy(ar.active, far.active) } // Handle deleted and unmatched duplicate rules. diff --git a/rules/manager.go b/rules/manager.go index b7ae404d8f..2e3c6a7c45 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "log/slog" + maps0 "maps" "net/url" "path/filepath" "slices" @@ -582,9 +583,7 @@ func FromMaps(maps ...map[string]string) labels.Labels { mLables := make(map[string]string) for _, m := range maps { - for k, v := range m { - mLables[k] = v - } + maps0.Copy(mLables, m) } return labels.FromMap(mLables) diff --git a/rules/manager_test.go b/rules/manager_test.go index 9fcdbdc56e..d405075143 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "io/fs" + "maps" "math" "os" "path" @@ -821,9 +822,7 @@ func TestUpdate(t *testing.T) { err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil) require.NoError(t, err) - for h, g := range ruleManager.groups { - ogs[h] = g - } + maps.Copy(ogs, ruleManager.groups) // Update interval and reload. for i, g := range rgs.Groups { @@ -2480,7 +2479,6 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { // Evaluate groups concurrently (like they normally do). var wg sync.WaitGroup for _, group := range groups { - group := group wg.Add(1) go func() { @@ -2532,7 +2530,7 @@ func TestGroup_Eval_RaceConditionOnStoppingGroupEvaluationWhileRulesAreEvaluated <-ruleManager.block // Update the group a decent number of times to simulate start and stopping in the middle of an evaluation. - for i := 0; i < 10; i++ { + for range 10 { err := ruleManager.Update(time.Second, files, labels.EmptyLabels(), "", nil) require.NoError(t, err) diff --git a/scrape/manager.go b/scrape/manager.go index c2da455858..b4f198ecbf 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -62,7 +62,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str graceShut: make(chan struct{}), triggerReload: make(chan struct{}, 1), metrics: sm, - buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), + buffers: pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }), } m.metrics.setTargetMetadataCacheGatherer(m) diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 0636a32a6c..c0535c6d2b 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -594,7 +594,7 @@ func TestManagerTargetsUpdates(t *testing.T) { defer m.Stop() tgSent := make(map[string][]*targetgroup.Group) - for x := 0; x < 10; x++ { + for x := range 10 { tgSent[strconv.Itoa(x)] = []*targetgroup.Group{ { Source: strconv.Itoa(x), @@ -1056,7 +1056,7 @@ scrape_configs: func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() // Check that all metrics can be unregistered, allowing a second manager to be created. - for i := 0; i < 2; i++ { + for range 2 { opts := Options{} manager, err := NewManager(&opts, nil, nil, nil, reg) require.NotNil(t, manager) diff --git a/scrape/scrape.go b/scrape/scrape.go index 95dbdf8a8e..5e52528a2f 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1262,7 +1262,7 @@ func newScrapeLoop(ctx context.Context, l = promslog.NewNopLogger() } if buffers == nil { - buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) }) } if cache == nil { cache = newScrapeCache(metrics) diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 870f5e85f6..955fbcdace 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "maps" "math" "net/http" "net/http/httptest" @@ -459,7 +460,7 @@ func TestScrapePoolStop(t *testing.T) { // clean them and the respective targets up. It must wait until each loop's // stop function returned before returning itself. - for i := 0; i < numTargets; i++ { + for i := range numTargets { t := &Target{ labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)), scrapeConfig: &config.ScrapeConfig{}, @@ -547,7 +548,7 @@ func TestScrapePoolReload(t *testing.T) { // loops and start new ones. A new loop must not be started before the preceding // one terminated. - for i := 0; i < numTargets; i++ { + for i := range numTargets { labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)) t := &Target{ labels: labels, @@ -569,9 +570,7 @@ func TestScrapePoolReload(t *testing.T) { done := make(chan struct{}) beforeTargets := map[uint64]*Target{} - for h, t := range sp.activeTargets { - beforeTargets[h] = t - } + maps.Copy(beforeTargets, sp.activeTargets) reloadTime := time.Now() @@ -691,7 +690,7 @@ func TestScrapePoolTargetLimit(t *testing.T) { } tgs := []*targetgroup.Group{} - for i := 0; i < 50; i++ { + for i := range 50 { tgs = append(tgs, &targetgroup.Group{ Targets: []model.LabelSet{ @@ -904,7 +903,7 @@ func TestScrapePoolRaces(t *testing.T) { require.Len(t, active, expectedActive, "Invalid number of active targets") require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets") - for i := 0; i < 20; i++ { + for range 20 { time.Sleep(10 * time.Millisecond) sp.reload(newConfig()) } @@ -1437,7 +1436,7 @@ func makeTestGauges(n int) []byte { sb := bytes.Buffer{} fmt.Fprintf(&sb, "# TYPE metric_a gauge\n") fmt.Fprintf(&sb, "# HELP metric_a help text\n") - for i := 0; i < n; i++ { + for i := range n { fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) } fmt.Fprintf(&sb, "# EOF\n") @@ -1817,7 +1816,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { numScrapes++ if numScrapes < 5 { s := "" - for i := 0; i < 500; i++ { + for i := range 500 { s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes) } w.Write([]byte(s + "&")) @@ -1929,7 +1928,7 @@ func TestScrapeLoopAppend(t *testing.T) { } } -func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { +func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) { t.Helper() testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.Comparer(equalFloatSamples), cmp.AllowUnexported(histogramSample{})}, @@ -3894,7 +3893,7 @@ func TestReuseCacheRace(t *testing.T) { MetricNameValidationScheme: model.UTF8Validation, MetricNameEscapingScheme: model.AllowUTF8, } - buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + buffers = pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }) sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t)) t1 = &Target{ labels: labels.FromStrings("labelNew", "nameNew"), @@ -4357,7 +4356,7 @@ func TestConvertClassicHistogramsToNHCB(t *testing.T) { `, name, value) } genTestHistText := func(name string, withMetadata bool) string { - data := map[string]interface{}{ + data := map[string]any{ "name": name, } b := &bytes.Buffer{} @@ -5145,7 +5144,7 @@ func BenchmarkTargetScraperGzip(b *testing.B) { {metricsCount: 100000}, } - for i := 0; i < len(scenarios); i++ { + for i := range scenarios { var buf bytes.Buffer var name string gw := gzip.NewWriter(&buf) @@ -5266,7 +5265,6 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) { }, } for name, tc := range testcases { - tc := tc t.Run(name, func(t *testing.T) { t.Parallel() testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema) diff --git a/scrape/target_test.go b/scrape/target_test.go index 85f2280c41..8d59b2573f 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -489,7 +489,7 @@ scrape_configs: for _, nTargets := range []int{1, 10, 100} { b.Run(fmt.Sprintf("%d_targets", nTargets), func(b *testing.B) { targets := []model.LabelSet{} - for i := 0; i < nTargets; i++ { + for i := range nTargets { labels := model.LabelSet{ model.AddressLabel: model.LabelValue(fmt.Sprintf("localhost:%d", i)), "__meta_kubernetes_namespace": "some_namespace", @@ -501,7 +501,7 @@ scrape_configs: "__meta_kubernetes_pod_phase": "Running", } // Add some more labels, because Kubernetes SD generates a lot - for i := 0; i < 10; i++ { + for i := range 10 { labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_label_extra%d", i))] = "a_label_abcdefgh" labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_labelpresent_extra%d", i))] = "true" } diff --git a/storage/merge.go b/storage/merge.go index 9b3bcee580..f8ba1ab76a 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -233,10 +233,7 @@ func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *Label } func mergeStrings(a, b []string) []string { - maxl := len(a) - if len(b) > len(a) { - maxl = len(b) - } + maxl := max(len(b), len(a)) res := make([]string, 0, maxl*10/9) for len(a) > 0 && len(b) > 0 { @@ -440,11 +437,11 @@ func (h genericSeriesSetHeap) Less(i, j int) bool { return labels.Compare(a, b) < 0 } -func (h *genericSeriesSetHeap) Push(x interface{}) { +func (h *genericSeriesSetHeap) Push(x any) { *h = append(*h, x.(genericSeriesSet)) } -func (h *genericSeriesSetHeap) Pop() interface{} { +func (h *genericSeriesSetHeap) Pop() any { old := *h n := len(old) x := old[n-1] @@ -698,11 +695,11 @@ func (h samplesIteratorHeap) Less(i, j int) bool { return h[i].AtT() < h[j].AtT() } -func (h *samplesIteratorHeap) Push(x interface{}) { +func (h *samplesIteratorHeap) Push(x any) { *h = append(*h, x.(chunkenc.Iterator)) } -func (h *samplesIteratorHeap) Pop() interface{} { +func (h *samplesIteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] @@ -846,11 +843,11 @@ func (h chunkIteratorHeap) Less(i, j int) bool { return at.MinTime < bt.MinTime } -func (h *chunkIteratorHeap) Push(x interface{}) { +func (h *chunkIteratorHeap) Push(x any) { *h = append(*h, x.(chunks.Iterator)) } -func (h *chunkIteratorHeap) Pop() interface{} { +func (h *chunkIteratorHeap) Pop() any { old := *h n := len(old) x := old[n-1] diff --git a/storage/merge_test.go b/storage/merge_test.go index 151c075117..ff00daf238 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1329,10 +1329,10 @@ func TestChainSampleIteratorSeekHistogramCounterResetHint(t *testing.T) { func makeSeries(numSeries, numSamples int) []Series { series := []Series{} - for j := 0; j < numSeries; j++ { + for j := range numSeries { labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j)) samples := []chunks.Sample{} - for k := 0; k < numSamples; k++ { + for k := range numSamples { samples = append(samples, fSample{t: int64(k), f: float64(k)}) } series = append(series, NewListSeries(labels, samples)) @@ -1393,9 +1393,9 @@ func BenchmarkMergeSeriesSet(b *testing.B) { func BenchmarkMergeLabelValuesWithLimit(b *testing.B) { var queriers []genericQuerier - for i := 0; i < 5; i++ { + for i := range 5 { var lbls []string - for j := 0; j < 100000; j++ { + for j := range 100000 { lbls = append(lbls, fmt.Sprintf("querier_%d_label_%d", i, j)) } q := &mockQuerier{resp: lbls} @@ -1680,7 +1680,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { } // Check slice but ignore difference between nil and empty. -func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) { +func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...any) { if len(a) == 0 { require.Empty(t, b, msgAndArgs...) } else { diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index ef4d6bb424..ea2a816d94 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -215,7 +215,7 @@ func (c *AzureADConfig) Validate() error { } // UnmarshalYAML unmarshal the Azure AD config yaml. -func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (c *AzureADConfig) UnmarshalYAML(unmarshal func(any) error) error { type plain AzureADConfig *c = AzureADConfig{} if err := unmarshal((*plain)(c)); err != nil { diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index 64524f8c93..7fb670a24d 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -1017,7 +1017,7 @@ func createSampledResponseHandler(t *testing.T, queries []*prompb.Query) http.Ha var timeseries []*prompb.TimeSeries // Create 2 series per query - for seriesIndex := 0; seriesIndex < 2; seriesIndex++ { + for seriesIndex := range 2 { var labels []prompb.Label if queryIndex == 0 { labels = []prompb.Label{ diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 0e650a6e85..cb54463796 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -537,7 +537,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) { require.Equal(t, expected, fh) // Keep calling Next() until the end. - for i := 0; i < 3; i++ { + for range 3 { require.Equal(t, chunkenc.ValHistogram, it.Next()) } @@ -1025,7 +1025,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk { time := startTime - for i := 0; i < numTestChunks; i++ { + for i := range numTestChunks { c := chunkenc.NewXORChunk() a, err := c.Appender() @@ -1033,7 +1033,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk { minTimeMs := time - for j := 0; j < numSamplesPerTestChunk; j++ { + for j := range numSamplesPerTestChunk { a.Append(time, float64(i+j)) time += int64(1000) } diff --git a/storage/remote/dial_context_test.go b/storage/remote/dial_context_test.go index d2716df53f..5a0cd7c88c 100644 --- a/storage/remote/dial_context_test.go +++ b/storage/remote/dial_context_test.go @@ -151,7 +151,7 @@ func TestDialContextWithRandomConnections(t *testing.T) { t.Run(name, func(t *testing.T) { dc := tc.setup() require.NotNil(t, dc) - for i := 0; i < numberOfRuns; i++ { + for range numberOfRuns { _, err := dc.dialContextFn()(context.Background(), testNetwork, tc.addr) require.NoError(t, err) } diff --git a/storage/remote/intern_test.go b/storage/remote/intern_test.go index 2b6a592ad7..f992b2ada6 100644 --- a/storage/remote/intern_test.go +++ b/storage/remote/intern_test.go @@ -75,7 +75,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) { require.True(t, ok) require.Equal(t, int64(1), interned.refs.Load(), "wrong interned refs count") - for i := 0; i < 1000; i++ { + for range 1000 { released := make(chan struct{}) go func() { interner.release(testString) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/context_test.go b/storage/remote/otlptranslator/prometheusremotewrite/context_test.go index 94b23be04f..4b47964313 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/context_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/context_test.go @@ -26,12 +26,12 @@ func TestEveryNTimes(t *testing.T) { n: n, } - for i := 0; i < n; i++ { + for range n { require.NoError(t, e.checkContext(ctx)) } cancel() - for i := 0; i < n-1; i++ { + for range n - 1 { require.NoError(t, e.checkContext(ctx)) } require.EqualError(t, e.checkContext(ctx), context.Canceled.Error()) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index f4199fd1c2..b6e9ab7d70 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -201,7 +201,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust Length: 0, }) - for i := 0; i < numBuckets; i++ { + for i := range numBuckets { nextBucketIdx := (int32(i)+offset)>>scaleDown + 1 if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. count += int64(bucketCounts[i]) @@ -224,7 +224,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. - for j := int32(0); j < gap; j++ { + for range gap { appendDelta(0) } } @@ -246,7 +246,7 @@ func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjust } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. - for j := int32(0); j < gap; j++ { + for range gap { appendDelta(0) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index 2a34dcb3c5..9addcb70cb 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -399,7 +399,7 @@ func BenchmarkConvertBucketLayout(b *testing.B) { for _, scenario := range scenarios { buckets := pmetric.NewExponentialHistogramDataPointBuckets() buckets.SetOffset(0) - for i := 0; i < 1000; i++ { + for i := range 1000 { if i%(scenario.gap+1) == 0 { buckets.BucketCounts().Append(10) } else { @@ -987,7 +987,7 @@ func BenchmarkConvertHistogramBucketsToNHCBLayout(b *testing.B) { for _, scenario := range scenarios { var buckets []uint64 - for i := 0; i < 1000; i++ { + for i := range 1000 { if i%(scenario.gap+1) == 0 { buckets = append(buckets, uint64(10)) } else { diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index e8f1d692ec..7cebdc3e37 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -557,11 +557,8 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr pBuf := proto.NewBuffer(nil) numSends := int(math.Ceil(float64(len(metadata)) / float64(t.mcfg.MaxSamplesPerSend))) - for i := 0; i < numSends; i++ { - last := (i + 1) * t.mcfg.MaxSamplesPerSend - if last > len(metadata) { - last = len(metadata) - } + for i := range numSends { + last := min((i+1)*t.mcfg.MaxSamplesPerSend, len(metadata)) err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) @@ -1241,7 +1238,7 @@ func (s *shards) start(n int) { s.qm.metrics.numShards.Set(float64(n)) newQueues := make([]*queue, n) - for i := 0; i < n; i++ { + for i := range n { newQueues[i] = newQueue(s.qm.cfg.MaxSamplesPerSend, s.qm.cfg.Capacity) } @@ -1259,7 +1256,7 @@ func (s *shards) start(n int) { s.exemplarsDroppedOnHardShutdown.Store(0) s.histogramsDroppedOnHardShutdown.Store(0) s.metadataDroppedOnHardShutdown.Store(0) - for i := 0; i < n; i++ { + for i := range n { go s.runShard(hardShutdownCtx, i, newQueues[i]) } } @@ -2028,11 +2025,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt onRetry() t.logger.Warn("Failed to send batch, retrying", "err", err) - backoff = sleepDuration * 2 - - if backoff > t.cfg.MaxBackoff { - backoff = t.cfg.MaxBackoff - } + backoff = min(sleepDuration*2, t.cfg.MaxBackoff) try++ } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 5ac2d14c94..7708f6911b 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -341,7 +341,7 @@ func TestMetadataDelivery(t *testing.T) { metadata := []scrape.MetricMetadata{} numMetadata := 1532 - for i := 0; i < numMetadata; i++ { + for i := range numMetadata { metadata = append(metadata, scrape.MetricMetadata{ MetricFamily: "prometheus_remote_storage_sent_metadata_bytes_" + strconv.Itoa(i), Type: model.MetricTypeCounter, @@ -439,7 +439,7 @@ func TestSampleDeliveryOrder(t *testing.T) { n := config.DefaultQueueConfig.MaxSamplesPerSend * ts samples := make([]record.RefSample, 0, n) series := make([]record.RefSeries, 0, n) - for i := 0; i < n; i++ { + for i := range n { name := fmt.Sprintf("test_metric_%d", i%ts) samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(i), @@ -509,9 +509,9 @@ func TestSeriesReset(t *testing.T) { cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig m := newTestQueueManager(t, cfg, mcfg, deadline, c, config.RemoteWriteProtoMsgV1) - for i := 0; i < numSegments; i++ { + for i := range numSegments { series := []record.RefSeries{} - for j := 0; j < numSeries; j++ { + for j := range numSeries { series = append(series, record.RefSeries{Ref: chunks.HeadSeriesRef((i * 100) + j), Labels: labels.FromStrings("a", "a")}) } m.StoreSeries(series, i) @@ -619,7 +619,7 @@ func TestReshardPartialBatch(t *testing.T) { m.Start() - for i := 0; i < 100; i++ { + for range 100 { done := make(chan struct{}) go func() { m.Append(samples) @@ -666,7 +666,7 @@ func TestQueueFilledDeadlock(t *testing.T) { m.Start() defer m.Stop() - for i := 0; i < 100; i++ { + for range 100 { done := make(chan struct{}) go func() { time.Sleep(batchSendDeadline) @@ -820,9 +820,9 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([ samples := make([]record.RefSample, 0, numSamples) series := make([]record.RefSeries, 0, numSeries) lb := labels.NewScratchBuilder(1 + len(extraLabels)) - for i := 0; i < numSeries; i++ { + for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) - for j := 0; j < numSamples; j++ { + for j := range numSamples { samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(i), T: int64(j), @@ -851,7 +851,7 @@ func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) [ samples := make([]prompb.TimeSeries, numSamples) // use a fixed rand source so tests are consistent r := rand.New(rand.NewSource(99)) - for j := int64(0); j < numSamples; j++ { + for j := range numSamples { name := fmt.Sprintf("test_metric_%d", j) samples[j] = prompb.TimeSeries{ @@ -874,9 +874,9 @@ func createProtoTimeseriesWithOld(numSamples, baseTs int64, _ ...labels.Label) [ func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) { exemplars := make([]record.RefExemplar, 0, numExemplars) series := make([]record.RefSeries, 0, numSeries) - for i := 0; i < numSeries; i++ { + for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) - for j := 0; j < numExemplars; j++ { + for j := range numExemplars { e := record.RefExemplar{ Ref: chunks.HeadSeriesRef(i), T: int64(j), @@ -897,9 +897,9 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record. histograms := make([]record.RefHistogramSample, 0, numSamples) floatHistograms := make([]record.RefFloatHistogramSample, 0, numSamples) series := make([]record.RefSeries, 0, numSeries) - for i := 0; i < numSeries; i++ { + for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) - for j := 0; j < numSamples; j++ { + for j := range numSamples { hist := &histogram.Histogram{ Schema: 2, ZeroThreshold: 1e-128, @@ -1594,11 +1594,9 @@ func TestCalculateDesiredShards(t *testing.T) { sin := inputRate * int64(shardUpdateDuration/time.Second) addSamples(sin, ts) - sout := int64(m.numShards*cfg.MaxSamplesPerSend) * int64(shardUpdateDuration/(100*time.Millisecond)) - // You can't send samples that don't exist so cap at the number of pending samples. - if sout > pendingSamples { - sout = pendingSamples - } + sout := min( + // You can't send samples that don't exist so cap at the number of pending samples. + int64(m.numShards*cfg.MaxSamplesPerSend)*int64(shardUpdateDuration/(100*time.Millisecond)), pendingSamples) sendSamples(sout, ts) t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples) @@ -1868,7 +1866,7 @@ func createDummyTimeSeries(instances int) []timeSeries { var result []timeSeries r := rand.New(rand.NewSource(0)) - for i := 0; i < instances; i++ { + for i := range instances { b := labels.NewBuilder(commonLabels) b.Set("pod", "prometheus-"+strconv.Itoa(i)) for _, lbls := range metrics { @@ -2070,7 +2068,7 @@ func createTimeseriesWithRandomLabelCount(id string, seriesCount int, timeAdd ti series := []record.RefSeries{} // use a fixed rand source so tests are consistent r := rand.New(rand.NewSource(99)) - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { s := record.RefSample{ Ref: chunks.HeadSeriesRef(i), T: time.Now().Add(timeAdd).UnixMilli(), @@ -2098,7 +2096,7 @@ func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...la samples := make([]record.RefSample, 0, numSamples) series := make([]record.RefSeries, 0, numSeries) lb := labels.NewScratchBuilder(1 + len(extraLabels)) - for i := 0; i < numSeries; i++ { + for i := range numSeries { name := fmt.Sprintf("test_metric_%d", i) // We create half of the samples in the past. past := timestamp.FromTime(time.Now().Add(-5 * time.Minute)) diff --git a/storage/remote/storage.go b/storage/remote/storage.go index d22bbacae4..3326d3bcd9 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -219,7 +219,7 @@ func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher { } // Used for hashing configs and diff'ing hashes in ApplyConfig. -func toHash(data interface{}) (string, error) { +func toHash(data any) (string, error) { bytes, err := yaml.Marshal(data) if err != nil { return "", err diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go index a62cd2da39..770a5df594 100644 --- a/storage/remote/storage_test.go +++ b/storage/remote/storage_test.go @@ -160,7 +160,7 @@ func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) { wg.Add(2000) start := make(chan struct{}) - for i := 0; i < 1000; i++ { + for i := range 1000 { go func(i int) { <-start conf := &config.Config{ @@ -174,7 +174,7 @@ func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) { }(i) } - for i := 0; i < 1000; i++ { + for range 1000 { go func() { <-start s.Notify() diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 1ae71f2bba..48b8a377cd 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -840,14 +840,14 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { require.Equal(b, uint64(1000), db.Head().NumSeries()) var bufRequests [][]byte - for i := 0; i < 100; i++ { + for i := range 100 { buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy") require.NoError(b, err) bufRequests = append(bufRequests, buf) } b.ResetTimer() - for i := 0; i < 100; i++ { + for i := range 100 { req, err = http.NewRequest("", "", bytes.NewReader(bufRequests[i])) require.NoError(b, err) @@ -860,7 +860,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) { func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries { var series []prompb.TimeSeries - for i := 0; i < numSeries; i++ { + for i := range numSeries { s := prompb.TimeSeries{ Labels: []prompb.Label{{Name: "__name__", Value: fmt.Sprintf("test_metric_%d", i)}}, Samples: []prompb.Sample{{Value: float64(i), Timestamp: ts}}, @@ -916,7 +916,7 @@ type mockMetadata struct { } // Wrapper to instruct go-cmp package to compare a list of structs with unexported fields. -func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) { +func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) { t.Helper() testutil.RequireEqualWithOptions(t, expected, actual, diff --git a/template/template.go b/template/template.go index 87ca32b346..295380cc83 100644 --- a/template/template.go +++ b/template/template.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" html_template "html/template" + "maps" "math" "net" "net/url" @@ -59,7 +60,7 @@ func init() { // A version of vector that's easier to use from templates. type sample struct { Labels map[string]string - Value interface{} + Value any } type queryResult []*sample @@ -110,7 +111,7 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer type Expander struct { text string name string - data interface{} + data any funcMap text_template.FuncMap options []string } @@ -120,7 +121,7 @@ func NewTemplateExpander( ctx context.Context, text string, name string, - data interface{}, + data any, timestamp model.Time, queryFunc QueryFunc, externalURL *url.URL, @@ -146,14 +147,14 @@ func NewTemplateExpander( "label": func(label string, s *sample) string { return s.Labels[label] }, - "value": func(s *sample) interface{} { + "value": func(s *sample) any { return s.Value }, "strvalue": func(s *sample) string { return s.Labels["__value__"] }, - "args": func(args ...interface{}) map[string]interface{} { - result := make(map[string]interface{}) + "args": func(args ...any) map[string]any { + result := make(map[string]any) for i, a := range args { result[fmt.Sprintf("arg%d", i)] = a } @@ -199,7 +200,7 @@ func NewTemplateExpander( } return host }, - "humanize": func(i interface{}) (string, error) { + "humanize": func(i any) (string, error) { v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err @@ -228,7 +229,7 @@ func NewTemplateExpander( } return fmt.Sprintf("%.4g%s", v, prefix), nil }, - "humanize1024": func(i interface{}) (string, error) { + "humanize1024": func(i any) (string, error) { v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err @@ -247,7 +248,7 @@ func NewTemplateExpander( return fmt.Sprintf("%.4g%s", v, prefix), nil }, "humanizeDuration": common_templates.HumanizeDuration, - "humanizePercentage": func(i interface{}) (string, error) { + "humanizePercentage": func(i any) (string, error) { v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err @@ -255,7 +256,7 @@ func NewTemplateExpander( return fmt.Sprintf("%.4g%%", v*100), nil }, "humanizeTimestamp": common_templates.HumanizeTimestamp, - "toTime": func(i interface{}) (*time.Time, error) { + "toTime": func(i any) (*time.Time, error) { v, err := common_templates.ConvertToFloat(i) if err != nil { return nil, err @@ -263,7 +264,7 @@ func NewTemplateExpander( return floatToTime(v) }, - "toDuration": func(i interface{}) (*time.Duration, error) { + "toDuration": func(i any) (*time.Duration, error) { v, err := common_templates.ConvertToFloat(i) if err != nil { return nil, err @@ -293,12 +294,12 @@ func NewTemplateExpander( } // AlertTemplateData returns the interface to be used in expanding the template. -func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, smpl promql.Sample) interface{} { +func AlertTemplateData(labels, externalLabels map[string]string, externalURL string, smpl promql.Sample) any { res := struct { Labels map[string]string ExternalLabels map[string]string ExternalURL string - Value interface{} + Value any }{ Labels: labels, ExternalLabels: externalLabels, @@ -316,9 +317,7 @@ func AlertTemplateData(labels, externalLabels map[string]string, externalURL str // Funcs adds the functions in fm to the Expander's function map. // Existing functions will be overwritten in case of conflict. func (te Expander) Funcs(fm text_template.FuncMap) { - for k, v := range fm { - te.funcMap[k] = v - } + maps.Copy(te.funcMap, fm) } // Expand expands a template in text (non-HTML) mode. @@ -369,7 +368,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap)) tmpl.Option(te.options...) tmpl.Funcs(html_template.FuncMap{ - "tmpl": func(name string, data interface{}) (html_template.HTML, error) { + "tmpl": func(name string, data any) (html_template.HTML, error) { var buffer bytes.Buffer err := tmpl.ExecuteTemplate(&buffer, name, data) return html_template.HTML(buffer.String()), err diff --git a/template/template_test.go b/template/template_test.go index 7b89f3e30a..f3348caae6 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -590,7 +590,7 @@ func TestTemplateExpansion(t *testing.T) { type scenario struct { text string output string - input interface{} + input any options []string queryResult promql.Vector shouldFail bool diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 4e53b6168b..36324c0ae6 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -297,11 +297,11 @@ func Open(l *slog.Logger, reg prometheus.Registerer, rs *remote.Storage, dir str metrics: newDBMetrics(reg), } - db.bufPool.New = func() interface{} { + db.bufPool.New = func() any { return make([]byte, 0, 1024) } - db.appenderPool.New = func() interface{} { + db.appenderPool.New = func() any { return &appender{ DB: db, pendingSeries: make([]record.RefSeries, 0, 100), @@ -440,7 +440,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H dec = record.NewDecoder(syms) lastRef = chunks.HeadSeriesRef(db.nextRef.Load()) - decoded = make(chan interface{}, 10) + decoded = make(chan any, 10) errCh = make(chan error, 1) ) diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index 0cd780677e..bd3fba5c11 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -135,7 +135,7 @@ func TestCommit(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { sample := chunks.GenerateSamples(0, 1) ref, err := app.Append(0, lset, sample[0].T(), sample[0].F()) require.NoError(t, err) @@ -157,7 +157,7 @@ func TestCommit(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) require.NoError(t, err) } @@ -169,7 +169,7 @@ func TestCommit(t *testing.T) { customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), customBucketHistograms[i], nil) require.NoError(t, err) } @@ -181,7 +181,7 @@ func TestCommit(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) require.NoError(t, err) } @@ -193,7 +193,7 @@ func TestCommit(t *testing.T) { customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), nil, customBucketFloatHistograms[i]) require.NoError(t, err) } @@ -274,7 +274,7 @@ func TestRollback(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { sample := chunks.GenerateSamples(0, 1) _, err := app.Append(0, lset, sample[0].T(), sample[0].F()) require.NoError(t, err) @@ -287,7 +287,7 @@ func TestRollback(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) require.NoError(t, err) } @@ -299,7 +299,7 @@ func TestRollback(t *testing.T) { histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) require.NoError(t, err) } @@ -311,7 +311,7 @@ func TestRollback(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) require.NoError(t, err) } @@ -323,7 +323,7 @@ func TestRollback(t *testing.T) { floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) require.NoError(t, err) } @@ -415,7 +415,7 @@ func TestFullTruncateWAL(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { _, err := app.Append(0, lset, int64(lastTs), 0) require.NoError(t, err) } @@ -428,7 +428,7 @@ func TestFullTruncateWAL(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil) require.NoError(t, err) } @@ -441,7 +441,7 @@ func TestFullTruncateWAL(t *testing.T) { histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil) require.NoError(t, err) } @@ -454,7 +454,7 @@ func TestFullTruncateWAL(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i]) require.NoError(t, err) } @@ -467,7 +467,7 @@ func TestFullTruncateWAL(t *testing.T) { floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i]) require.NoError(t, err) } @@ -502,7 +502,7 @@ func TestPartialTruncateWAL(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { _, err := app.Append(0, lset, lastTs, 0) require.NoError(t, err) } @@ -515,7 +515,7 @@ func TestPartialTruncateWAL(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) require.NoError(t, err) } @@ -528,7 +528,7 @@ func TestPartialTruncateWAL(t *testing.T) { histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) require.NoError(t, err) } @@ -541,7 +541,7 @@ func TestPartialTruncateWAL(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) require.NoError(t, err) } @@ -554,7 +554,7 @@ func TestPartialTruncateWAL(t *testing.T) { floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) require.NoError(t, err) } @@ -567,7 +567,7 @@ func TestPartialTruncateWAL(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { _, err := app.Append(0, lset, lastTs, 0) require.NoError(t, err) } @@ -580,7 +580,7 @@ func TestPartialTruncateWAL(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) require.NoError(t, err) } @@ -593,7 +593,7 @@ func TestPartialTruncateWAL(t *testing.T) { histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) require.NoError(t, err) } @@ -606,7 +606,7 @@ func TestPartialTruncateWAL(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) require.NoError(t, err) } @@ -619,7 +619,7 @@ func TestPartialTruncateWAL(t *testing.T) { floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) require.NoError(t, err) } @@ -649,7 +649,7 @@ func TestWALReplay(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { _, err := app.Append(0, lset, lastTs, 0) require.NoError(t, err) } @@ -661,7 +661,7 @@ func TestWALReplay(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) require.NoError(t, err) } @@ -673,7 +673,7 @@ func TestWALReplay(t *testing.T) { histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil) require.NoError(t, err) } @@ -685,7 +685,7 @@ func TestWALReplay(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) require.NoError(t, err) } @@ -697,7 +697,7 @@ func TestWALReplay(t *testing.T) { floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { _, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i]) require.NoError(t, err) } @@ -725,7 +725,7 @@ func TestWALReplay(t *testing.T) { // Check if lastTs of the samples retrieved from the WAL is retained. metrics := replayStorage.series.series - for i := 0; i < len(metrics); i++ { + for i := range metrics { mp := metrics[i] for _, v := range mp { require.Equal(t, v.lastTs, int64(lastTs)) @@ -769,7 +769,7 @@ func Test_ExistingWAL_NextRef(t *testing.T) { // Append series app := db.Appender(context.Background()) - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("series_%d", i)) _, err := app.Append(0, lset, 0, 100) require.NoError(t, err) @@ -778,7 +778,7 @@ func Test_ExistingWAL_NextRef(t *testing.T) { histogramCount := 10 histograms := tsdbutil.GenerateTestHistograms(histogramCount) // Append series - for i := 0; i < histogramCount; i++ { + for i := range histogramCount { lset := labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("histogram_%d", i)) _, err := app.AppendHistogram(0, lset, 0, histograms[i], nil) require.NoError(t, err) @@ -826,7 +826,7 @@ func startTime() (int64, error) { func labelsForTest(lName string, seriesCount int) [][]labels.Label { var series [][]labels.Label - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { lset := []labels.Label{ {Name: "a", Value: lName}, {Name: "instance", Value: "localhost" + strconv.Itoa(i)}, @@ -1014,7 +1014,7 @@ func TestDBAllowOOOSamples(t *testing.T) { for _, l := range lbls { lset := labels.New(l...) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { ref, err := app.Append(0, lset, int64(i), float64(i)) require.NoError(t, err) @@ -1035,7 +1035,7 @@ func TestDBAllowOOOSamples(t *testing.T) { histograms := tsdbutil.GenerateTestHistograms(numHistograms) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) require.NoError(t, err) } @@ -1047,7 +1047,7 @@ func TestDBAllowOOOSamples(t *testing.T) { histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil) require.NoError(t, err) } @@ -1059,7 +1059,7 @@ func TestDBAllowOOOSamples(t *testing.T) { floatHistograms := tsdbutil.GenerateTestFloatHistograms(numHistograms) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) require.NoError(t, err) } @@ -1071,7 +1071,7 @@ func TestDBAllowOOOSamples(t *testing.T) { floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms) - for i := 0; i < numDatapoints; i++ { + for i := range numDatapoints { _, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i]) require.NoError(t, err) } diff --git a/tsdb/agent/series_test.go b/tsdb/agent/series_test.go index f1123480ba..036a80de4c 100644 --- a/tsdb/agent/series_test.go +++ b/tsdb/agent/series_test.go @@ -36,7 +36,7 @@ func TestNoDeadlock(t *testing.T) { ) wg.Add(numWorkers) - for i := 0; i < numWorkers; i++ { + for range numWorkers { go func() { defer wg.Done() <-started @@ -45,7 +45,7 @@ func TestNoDeadlock(t *testing.T) { } wg.Add(numWorkers) - for i := 0; i < numWorkers; i++ { + for i := range numWorkers { go func(i int) { defer wg.Done() <-started diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 652ccda1fe..bc869c670f 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -232,7 +232,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { ctx := context.Background() var seriesEntries []storage.Series - for i := 0; i < 100; i++ { + for i := range 100 { seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( "tens", fmt.Sprintf("value%d", i/10), "unique", fmt.Sprintf("value%d", i), @@ -254,7 +254,7 @@ func TestLabelValuesWithMatchers(t *testing.T) { defer func() { require.NoError(t, indexReader.Close()) }() var uniqueWithout30s []string - for i := 0; i < 100; i++ { + for i := range 100 { if i/10 != 3 { uniqueWithout30s = append(uniqueWithout30s, fmt.Sprintf("value%d", i)) } @@ -429,7 +429,7 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) { var seriesEntries []storage.Series metricCount := 1000000 - for i := 0; i < metricCount; i++ { + for i := range metricCount { // Note these series are not created in sort order: 'value2' sorts after 'value10'. // This makes a big difference to the benchmark timing. seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( @@ -470,7 +470,7 @@ func TestLabelNamesWithMatchers(t *testing.T) { ctx := context.Background() var seriesEntries []storage.Series - for i := 0; i < 100; i++ { + for i := range 100 { seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings( "unique", fmt.Sprintf("value%d", i), ), []chunks.Sample{sample{100, 0, nil, nil}})) @@ -856,7 +856,7 @@ func genSeriesFromSampleGenerator(totalSeries, labelCount int, mint, maxt, step series := make([]storage.Series, totalSeries) - for i := 0; i < totalSeries; i++ { + for i := range totalSeries { lbls := make(map[string]string, labelCount) lbls[defaultLabelName] = strconv.Itoa(i) for j := 1; len(lbls) < labelCount; j++ { diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index 1520619f4b..8cccb189fa 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -288,17 +288,17 @@ type pool struct { func NewPool() Pool { return &pool{ xor: sync.Pool{ - New: func() interface{} { + New: func() any { return &XORChunk{b: bstream{}} }, }, histogram: sync.Pool{ - New: func() interface{} { + New: func() any { return &HistogramChunk{b: bstream{}} }, }, floatHistogram: sync.Pool{ - New: func() interface{} { + New: func() any { return &FloatHistogramChunk{b: bstream{}} }, }, diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index a5e75ca32b..f72c439b42 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -50,7 +50,7 @@ func testChunk(t *testing.T, c Chunk) { ts = int64(1234123324) v = 1243535.123 ) - for i := 0; i < 300; i++ { + for i := range 300 { ts += int64(rand.Intn(10000) + 1) if i%2 == 0 { v += float64(rand.Intn(1000000)) @@ -207,7 +207,7 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) { v = 1243535.123 exp []pair ) - for i := 0; i < samplesPerChunk; i++ { + for range samplesPerChunk { // t += int64(rand.Intn(10000) + 1) t += int64(1000) // v = rand.Float64() @@ -287,7 +287,7 @@ func benchmarkAppender(b *testing.B, deltas func() (int64, float64), newChunk fu ) const nSamples = 120 // Same as tsdb.DefaultSamplesPerChunk. var exp []pair - for i := 0; i < nSamples; i++ { + for range nSamples { dt, dv := deltas() t += dt v += dv diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 564b312db5..ba2ebba730 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -568,7 +568,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa numPBuckets, numNBuckets := countSpans(h.PositiveSpans), countSpans(h.NegativeSpans) if numPBuckets > 0 { a.pBuckets = make([]xorValue, numPBuckets) - for i := 0; i < numPBuckets; i++ { + for i := range numPBuckets { a.pBuckets[i] = xorValue{ value: h.PositiveBuckets[i], leading: 0xff, @@ -579,7 +579,7 @@ func (a *FloatHistogramAppender) appendFloatHistogram(t int64, h *histogram.Floa } if numNBuckets > 0 { a.nBuckets = make([]xorValue, numNBuckets) - for i := 0; i < numNBuckets; i++ { + for i := range numNBuckets { a.nBuckets[i] = xorValue{ value: h.NegativeBuckets[i], leading: 0xff, diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index 4233f0bae1..5c708faf5f 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -1736,10 +1736,10 @@ func BenchmarkAppendable(b *testing.B) { ZeroThreshold: 0.001, ZeroCount: 5, } - for i := 0; i < numSpans; i++ { + for range numSpans { h.PositiveSpans = append(h.PositiveSpans, histogram.Span{Offset: 5, Length: spanLength}) h.NegativeSpans = append(h.NegativeSpans, histogram.Span{Offset: 5, Length: spanLength}) - for j := 0; j < spanLength; j++ { + for j := range spanLength { h.PositiveBuckets = append(h.PositiveBuckets, int64(j)) h.NegativeBuckets = append(h.NegativeBuckets, int64(j)) } diff --git a/tsdb/chunkenc/varbit.go b/tsdb/chunkenc/varbit.go index 574edec48b..00ba027dda 100644 --- a/tsdb/chunkenc/varbit.go +++ b/tsdb/chunkenc/varbit.go @@ -64,7 +64,7 @@ func putVarbitInt(b *bstream, val int64) { // readVarbitInt reads an int64 encoded with putVarbitInt. func readVarbitInt(b *bstreamReader) (int64, error) { var d byte - for i := 0; i < 8; i++ { + for range 8 { d <<= 1 bit, err := b.readBitFast() if err != nil { @@ -169,7 +169,7 @@ func putVarbitUint(b *bstream, val uint64) { // readVarbitUint reads a uint64 encoded with putVarbitUint. func readVarbitUint(b *bstreamReader) (uint64, error) { var d byte - for i := 0; i < 8; i++ { + for range 8 { d <<= 1 bit, err := b.readBitFast() if err != nil { diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 5a37ebfea9..7d619b3300 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -330,7 +330,7 @@ func (it *xorIterator) Next() ValueType { var d byte // read delta-of-delta - for i := 0; i < 4; i++ { + for range 4 { d <<= 1 bit, err := it.br.readBitFast() if err != nil { diff --git a/tsdb/chunks/chunk_write_queue_test.go b/tsdb/chunks/chunk_write_queue_test.go index ea752ce4ba..b37021025d 100644 --- a/tsdb/chunks/chunk_write_queue_test.go +++ b/tsdb/chunks/chunk_write_queue_test.go @@ -249,7 +249,7 @@ func BenchmarkChunkWriteQueue_addJob(b *testing.B) { done := sync.WaitGroup{} done.Add(concurrentWrites) - for w := 0; w < concurrentWrites; w++ { + for range concurrentWrites { go func() { start.Wait() for j := range jobs { diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index 89c508aa8f..41fce69c72 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -1109,7 +1109,7 @@ type chunkBuffer struct { func newChunkBuffer() *chunkBuffer { cb := &chunkBuffer{} - for i := 0; i < inBufferShards; i++ { + for i := range inBufferShards { cb.inBufferChunks[i] = make(map[ChunkDiskMapperRef]chunkenc.Chunk) } return cb @@ -1133,7 +1133,7 @@ func (cb *chunkBuffer) get(ref ChunkDiskMapperRef) chunkenc.Chunk { } func (cb *chunkBuffer) clear() { - for i := 0; i < inBufferShards; i++ { + for i := range inBufferShards { cb.inBufferChunksMtxs[i].Lock() cb.inBufferChunks[i] = make(map[ChunkDiskMapperRef]chunkenc.Chunk) cb.inBufferChunksMtxs[i].Unlock() diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index ce567bcde0..15e0d9acd7 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -69,7 +69,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { var firstFileName string for hrw.curFileSequence < 3 || hrw.chkWriter.Buffered() == 0 { addChunks := func(numChunks int) { - for i := 0; i < numChunks; i++ { + for range numChunks { seriesRef, chkRef, mint, maxt, chunk, isOOO := createChunk(t, totalChunks, hrw) totalChunks++ expectedData = append(expectedData, expectedDataType{ @@ -558,7 +558,7 @@ func randomChunk(t *testing.T) chunkenc.Chunk { length := rand.Int() % 120 app, err := chunk.Appender() require.NoError(t, err) - for i := 0; i < length; i++ { + for range length { app.Append(rand.Int63(), rand.Float64()) } return chunk diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index 3d9275eeef..ab4dd14838 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -76,7 +76,7 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) { lastWriteID := 0 lastReadID := 0 - for iter := 0; iter < maxIters; iter++ { + for range maxIters { if elements < maxCount { toWrite := r.Int() % (maxCount - elements) if toWrite == 0 { @@ -235,7 +235,7 @@ func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) { queue := newWriteJobQueue(count, count) - for i := 0; i < count; i++ { + for i := range count { require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(i)})) } @@ -246,7 +246,7 @@ func TestQueuePopAfterCloseReturnsAllElements(t *testing.T) { require.False(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(11111)})) // Verify that we can still read all pushed elements. - for i := 0; i < count; i++ { + for i := range count { j, b := queue.pop() require.True(t, b) require.Equal(t, HeadSeriesRef(i), j.seriesRef) @@ -268,7 +268,7 @@ func TestQueuePushPopManyGoroutines(t *testing.T) { refs := map[HeadSeriesRef]bool{} readersWG := sync.WaitGroup{} - for i := 0; i < readGoroutines; i++ { + for range readGoroutines { readersWG.Add(1) go func() { @@ -285,13 +285,13 @@ func TestQueuePushPopManyGoroutines(t *testing.T) { id := atomic.Uint64{} writersWG := sync.WaitGroup{} - for i := 0; i < writeGoroutines; i++ { + for range writeGoroutines { writersWG.Add(1) go func() { defer writersWG.Done() - for i := 0; i < writes; i++ { + for range writes { ref := id.Inc() require.True(t, queue.push(chunkWriteJob{seriesRef: HeadSeriesRef(ref)})) diff --git a/tsdb/compact.go b/tsdb/compact.go index 0641b75720..6c79059ad1 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -42,7 +42,7 @@ import ( func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 { ranges := make([]int64, 0, steps) curRange := minSize - for i := 0; i < steps; i++ { + for range steps { ranges = append(ranges, curRange) curRange *= int64(stepSize) } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index f2619e2497..8a4d19c179 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1196,7 +1196,7 @@ func BenchmarkCompactionFromHead(b *testing.B) { require.NoError(b, err) for ln := 0; ln < labelNames; ln++ { app := h.Appender(context.Background()) - for lv := 0; lv < labelValues; lv++ { + for lv := range labelValues { app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0) } require.NoError(b, app.Commit()) @@ -1228,11 +1228,11 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) { require.NoError(b, err) for ln := 0; ln < labelNames; ln++ { app := h.Appender(context.Background()) - for lv := 0; lv < labelValues; lv++ { + for lv := range labelValues { lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) _, err = app.Append(0, lbls, int64(totalSamples), 0) require.NoError(b, err) - for ts := 0; ts < totalSamples; ts++ { + for ts := range totalSamples { _, err = app.Append(0, lbls, int64(ts), float64(ts)) require.NoError(b, err) } @@ -1269,7 +1269,7 @@ func TestDisableAutoCompactions(t *testing.T) { // no new blocks were created when compaction is disabled. db.DisableCompactions() app := db.Appender(context.Background()) - for i := int64(0); i < 3; i++ { + for i := range int64(3) { _, err := app.Append(0, label, i*blockRange, 0) require.NoError(t, err) _, err = app.Append(0, label, i*blockRange+1000, 0) @@ -1282,7 +1282,7 @@ func TestDisableAutoCompactions(t *testing.T) { default: } - for x := 0; x < 10; x++ { + for range 10 { if prom_testutil.ToFloat64(db.metrics.compactionsSkipped) > 0.0 { break } @@ -1298,7 +1298,7 @@ func TestDisableAutoCompactions(t *testing.T) { case db.compactc <- struct{}{}: default: } - for x := 0; x < 100; x++ { + for range 100 { if len(db.Blocks()) > 0 { break } @@ -1683,7 +1683,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { ref storage.SeriesRef err error ) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { ts := int64(i) * timeStep ref, err = sparseApp.AppendHistogram(ref, ah.baseLabels, ts, ah.hists[i], nil) require.NoError(t, err) @@ -1708,7 +1708,7 @@ func TestSparseHistogramSpaceSavings(t *testing.T) { // Ingest histograms the old way. for _, ah := range allSparseSeries { refs := make([]storage.SeriesRef, c.numBuckets+((c.numSpans-1)*c.gapBetweenSpans)) - for i := 0; i < numHistograms; i++ { + for i := range numHistograms { ts := int64(i) * timeStep h := ah.hists[i] @@ -2117,7 +2117,6 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { } for _, c := range cases { - c := c t.Run(c.name, func(t *testing.T) { t.Parallel() diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 87b3619705..5224b2800f 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -248,7 +248,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) { ctx := context.Background() { // Appending 121 samples because on the 121st a new chunk will be created. - for i := 0; i < 121; i++ { + for range 121 { app := db.Appender(ctx) _, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0) expSamples = append(expSamples, sample{t: maxt, f: 0}) @@ -436,7 +436,7 @@ func TestDeleteSimple(t *testing.T) { app := db.Appender(ctx) smpls := make([]float64, numSamples) - for i := int64(0); i < numSamples; i++ { + for i := range numSamples { smpls[i] = rand.Float64() app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) } @@ -645,7 +645,7 @@ func TestDB_Snapshot(t *testing.T) { ctx := context.Background() app := db.Appender(ctx) mint := int64(1414141414000) - for i := 0; i < 1000; i++ { + for i := range 1000 { _, err := app.Append(0, labels.FromStrings("foo", "bar"), mint+int64(i), 1.0) require.NoError(t, err) } @@ -691,7 +691,7 @@ func TestDB_Snapshot_ChunksOutsideOfCompactedRange(t *testing.T) { ctx := context.Background() app := db.Appender(ctx) mint := int64(1414141414000) - for i := 0; i < 1000; i++ { + for i := range 1000 { _, err := app.Append(0, labels.FromStrings("foo", "bar"), mint+int64(i), 1.0) require.NoError(t, err) } @@ -743,7 +743,7 @@ func TestDB_SnapshotWithDelete(t *testing.T) { app := db.Appender(ctx) smpls := make([]float64, numSamples) - for i := int64(0); i < numSamples; i++ { + for i := range numSamples { smpls[i] = rand.Float64() app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) } @@ -889,7 +889,7 @@ func TestDB_e2e(t *testing.T) { series := []chunks.Sample{} ts := rand.Int63n(300) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { v := rand.Float64() series = append(series, sample{ts, v, nil, nil}) @@ -940,7 +940,7 @@ func TestDB_e2e(t *testing.T) { sort.Sort(matched) - for i := 0; i < numRanges; i++ { + for range numRanges { mint := rand.Int63n(300) maxt := mint + rand.Int63n(timeInterval*int64(numDatapoints)) @@ -1065,7 +1065,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { opts.WALSegmentSize = segmentSize db := openTestDB(t, opts, nil) - for i := int64(0); i < 155; i++ { + for i := range int64(155) { app := db.Appender(context.Background()) ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64()) require.NoError(t, err) @@ -1113,7 +1113,7 @@ func testWALReplayRaceOnSamplesLoggedBeforeSeries(t *testing.T, numSamplesBefore var enc record.Encoder var samples []record.RefSample - for ts := 0; ts < numSamplesBeforeSeriesCreation; ts++ { + for ts := range numSamplesBeforeSeriesCreation { samples = append(samples, record.RefSample{ Ref: chunks.HeadSeriesRef(uint64(seriesRef)), T: int64(ts), @@ -1180,7 +1180,7 @@ func TestTombstoneClean(t *testing.T) { app := db.Appender(ctx) smpls := make([]float64, numSamples) - for i := int64(0); i < numSamples; i++ { + for i := range numSamples { smpls[i] = rand.Float64() app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) } @@ -1275,7 +1275,7 @@ func TestTombstoneCleanResultEmptyBlock(t *testing.T) { app := db.Appender(ctx) smpls := make([]float64, numSamples) - for i := int64(0); i < numSamples; i++ { + for i := range numSamples { smpls[i] = rand.Float64() app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) } @@ -1322,7 +1322,7 @@ func TestTombstoneCleanFail(t *testing.T) { // Create some blocks pending for compaction. // totalBlocks should be >=2 so we have enough blocks to trigger compaction failure. totalBlocks := 2 - for i := 0; i < totalBlocks; i++ { + for i := range totalBlocks { blockDir := createBlock(t, db.Dir(), genSeries(1, 1, int64(i), int64(i)+1)) block, err := OpenBlock(nil, blockDir, nil, nil) require.NoError(t, err) @@ -1852,7 +1852,7 @@ func TestChunkAtBlockBoundary(t *testing.T) { blockRange := db.compactor.(*LeveledCompactor).ranges[0] label := labels.FromStrings("foo", "bar") - for i := int64(0); i < 3; i++ { + for i := range int64(3) { _, err := app.Append(0, label, i*blockRange, 0) require.NoError(t, err) _, err = app.Append(0, label, i*blockRange+1000, 0) @@ -1909,7 +1909,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) { blockRange := db.compactor.(*LeveledCompactor).ranges[0] label := labels.FromStrings("foo", "bar") - for i := int64(0); i < 5; i++ { + for i := range int64(5) { _, err := app.Append(0, label, i*blockRange, 0) require.NoError(t, err) _, err = app.Append(0, labels.FromStrings("blockID", strconv.FormatInt(i, 10)), i*blockRange, 0) @@ -2267,8 +2267,8 @@ func TestCorrectNumTombstones(t *testing.T) { ctx := context.Background() app := db.Appender(ctx) - for i := int64(0); i < 3; i++ { - for j := int64(0); j < 15; j++ { + for i := range int64(3) { + for j := range int64(15) { _, err := app.Append(0, defaultLabel, i*blockRange+j, 0) require.NoError(t, err) } @@ -2333,7 +2333,7 @@ func TestBlockRanges(t *testing.T) { require.NoError(t, err) require.NoError(t, app.Commit()) - for x := 0; x < 100; x++ { + for range 100 { if len(db.Blocks()) == 2 { break } @@ -2373,7 +2373,7 @@ func TestBlockRanges(t *testing.T) { _, err = app.Append(0, lbl, thirdBlockMaxt+rangeToTriggerCompaction, rand.Float64()) // Trigger a compaction require.NoError(t, err) require.NoError(t, app.Commit()) - for x := 0; x < 100; x++ { + for range 100 { if len(db.Blocks()) == 4 { break } @@ -2629,7 +2629,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { }() // Append until the first mmapped head chunk. - for i := 0; i < 121; i++ { + for i := range 121 { app := db.Appender(context.Background()) _, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0) require.NoError(t, err) @@ -2687,7 +2687,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) { for { app := db.Appender(ctx) - for j := 0; j < 100; j++ { + for j := range 100 { _, err := app.Append(0, labels.FromStrings("foo", "bar", "a", strconv.Itoa(j)), int64(iter), float64(iter)) require.NoError(t, err) } @@ -2712,7 +2712,7 @@ func TestDBCannotSeePartialCommits(t *testing.T) { // This is a race condition, so do a few tests to tickle it. // Usually most will fail. inconsistencies := 0 - for i := 0; i < 10; i++ { + for range 10 { func() { querier, err := db.Querier(0, 1000000) require.NoError(t, err) @@ -3039,7 +3039,7 @@ func TestChunkReader_ConcurrentReads(t *testing.T) { var wg sync.WaitGroup for _, chk := range chks { - for i := 0; i < 100; i++ { + for range 100 { wg.Add(1) go func(chunk chunks.Meta) { defer wg.Done() @@ -3082,7 +3082,7 @@ func TestCompactHead(t *testing.T) { app := db.Appender(ctx) var expSamples []sample maxt := 100 - for i := 0; i < maxt; i++ { + for i := range maxt { val := rand.Float64() _, err := app.Append(0, labels.FromStrings("a", "b"), int64(i), val) require.NoError(t, err) @@ -3306,7 +3306,7 @@ func TestOneCheckpointPerCompactCall(t *testing.T) { lbls := labels.FromStrings("foo_d", "choco_bar") // Append samples spanning 59 block ranges. app := db.Appender(context.Background()) - for i := int64(0); i < 60; i++ { + for i := range int64(60) { _, err := app.Append(0, lbls, blockRange*i, rand.Float64()) require.NoError(t, err) _, err = app.Append(0, lbls, (blockRange*i)+blockRange/2, rand.Float64()) @@ -3460,7 +3460,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t // Generate the metrics we're going to append. metrics := make([]labels.Labels, 0, numSeries) - for i := 0; i < numSeries; i++ { + for i := range numSeries { metrics = append(metrics, labels.FromStrings(labels.MetricName, fmt.Sprintf("test_%d", i))) } @@ -3534,7 +3534,7 @@ func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t // Stress the memory and call GC. This is required to increase the chances // the chunk memory area is released to the kernel. var buf []byte - for i := 0; i < numStressIterations; i++ { + for i := range numStressIterations { //nolint:staticcheck buf = append(buf, make([]byte, minStressAllocationBytes+rand.Int31n(maxStressAllocationBytes-minStressAllocationBytes))...) if i%1000 == 0 { @@ -3596,7 +3596,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun // Generate the metrics we're going to append. metrics := make([]labels.Labels, 0, numSeries) - for i := 0; i < numSeries; i++ { + for i := range numSeries { metrics = append(metrics, labels.FromStrings(labels.MetricName, fmt.Sprintf("test_%d", i))) } @@ -3668,7 +3668,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun // Stress the memory and call GC. This is required to increase the chances // the chunk memory area is released to the kernel. var buf []byte - for i := 0; i < numStressIterations; i++ { + for i := range numStressIterations { //nolint:staticcheck buf = append(buf, make([]byte, minStressAllocationBytes+rand.Int31n(maxStressAllocationBytes-minStressAllocationBytes))...) if i%1000 == 0 { @@ -3970,8 +3970,8 @@ func TestOOOWALWrite(t *testing.T) { scenarios := map[string]struct { appendSample func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) - expectedOOORecords []interface{} - expectedInORecords []interface{} + expectedOOORecords []any + expectedInORecords []any }{ "float": { appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) { @@ -3979,7 +3979,7 @@ func TestOOOWALWrite(t *testing.T) { require.NoError(t, err) return seriesRef, nil }, - expectedOOORecords: []interface{}{ + expectedOOORecords: []any{ // The MmapRef in this are not hand calculated, and instead taken from the test run. // What is important here is the order of records, and that MmapRef increases for each record. []record.RefMmapMarker{ @@ -4031,7 +4031,7 @@ func TestOOOWALWrite(t *testing.T) { {Ref: 2, T: minutes(53), V: 53}, }, }, - expectedInORecords: []interface{}{ + expectedInORecords: []any{ []record.RefSeries{ {Ref: 1, Labels: s1}, {Ref: 2, Labels: s2}, @@ -4070,7 +4070,7 @@ func TestOOOWALWrite(t *testing.T) { require.NoError(t, err) return seriesRef, nil }, - expectedOOORecords: []interface{}{ + expectedOOORecords: []any{ // The MmapRef in this are not hand calculated, and instead taken from the test run. // What is important here is the order of records, and that MmapRef increases for each record. []record.RefMmapMarker{ @@ -4122,7 +4122,7 @@ func TestOOOWALWrite(t *testing.T) { {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestHistogram(53)}, }, }, - expectedInORecords: []interface{}{ + expectedInORecords: []any{ []record.RefSeries{ {Ref: 1, Labels: s1}, {Ref: 2, Labels: s2}, @@ -4161,7 +4161,7 @@ func TestOOOWALWrite(t *testing.T) { require.NoError(t, err) return seriesRef, nil }, - expectedOOORecords: []interface{}{ + expectedOOORecords: []any{ // The MmapRef in this are not hand calculated, and instead taken from the test run. // What is important here is the order of records, and that MmapRef increases for each record. []record.RefMmapMarker{ @@ -4213,7 +4213,7 @@ func TestOOOWALWrite(t *testing.T) { {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestFloatHistogram(53)}, }, }, - expectedInORecords: []interface{}{ + expectedInORecords: []any{ []record.RefSeries{ {Ref: 1, Labels: s1}, {Ref: 2, Labels: s2}, @@ -4252,7 +4252,7 @@ func TestOOOWALWrite(t *testing.T) { require.NoError(t, err) return seriesRef, nil }, - expectedOOORecords: []interface{}{ + expectedOOORecords: []any{ // The MmapRef in this are not hand calculated, and instead taken from the test run. // What is important here is the order of records, and that MmapRef increases for each record. []record.RefMmapMarker{ @@ -4304,7 +4304,7 @@ func TestOOOWALWrite(t *testing.T) { {Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)}, }, }, - expectedInORecords: []interface{}{ + expectedInORecords: []any{ []record.RefSeries{ {Ref: 1, Labels: s1}, {Ref: 2, Labels: s2}, @@ -4343,7 +4343,7 @@ func TestOOOWALWrite(t *testing.T) { require.NoError(t, err) return seriesRef, nil }, - expectedOOORecords: []interface{}{ + expectedOOORecords: []any{ // The MmapRef in this are not hand calculated, and instead taken from the test run. // What is important here is the order of records, and that MmapRef increases for each record. []record.RefMmapMarker{ @@ -4395,7 +4395,7 @@ func TestOOOWALWrite(t *testing.T) { {Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)}, }, }, - expectedInORecords: []interface{}{ + expectedInORecords: []any{ []record.RefSeries{ {Ref: 1, Labels: s1}, {Ref: 2, Labels: s2}, @@ -4438,8 +4438,8 @@ func TestOOOWALWrite(t *testing.T) { func testOOOWALWrite(t *testing.T, appendSample func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error), - expectedOOORecords []interface{}, - expectedInORecords []interface{}, + expectedOOORecords []any, + expectedInORecords []any, ) { dir := t.TempDir() @@ -4495,7 +4495,7 @@ func testOOOWALWrite(t *testing.T, appendSample(app, s2, 53) require.NoError(t, app.Commit()) - getRecords := func(walDir string) []interface{} { + getRecords := func(walDir string) []any { sr, err := wlog.NewSegmentsReader(walDir) require.NoError(t, err) r := wlog.NewReader(sr) @@ -4503,7 +4503,7 @@ func testOOOWALWrite(t *testing.T, require.NoError(t, sr.Close()) }() - var records []interface{} + var records []any dec := record.NewDecoder(nil) for r.Next() { rec := r.Record() @@ -4562,7 +4562,7 @@ func TestDBPanicOnMmappingHeadChunk(t *testing.T) { app := db.Appender(context.Background()) var ref storage.SeriesRef lbls := labels.FromStrings("__name__", "testing", "foo", "bar") - for i := 0; i < numSamples; i++ { + for i := range numSamples { ref, err = app.Append(ref, lbls, lastTs, float64(lastTs)) require.NoError(t, err) lastTs += itvl @@ -7600,7 +7600,7 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { // OOO compaction fails 5 times. originalCompactor := db.compactor db.compactor = &mockCompactorFailing{t: t} - for i := 0; i < 5; i++ { + for range 5 { require.Error(t, db.CompactOOOHead(ctx)) } require.Empty(t, db.Blocks()) @@ -9170,9 +9170,9 @@ func TestChunkQuerierReadWriteRace(t *testing.T) { writer := func() error { <-time.After(5 * time.Millisecond) // Initial pause while readers start. ts := 0 - for i := 0; i < 500; i++ { + for range 500 { app := db.Appender(context.Background()) - for j := 0; j < 10; j++ { + for range 10 { ts++ _, err := app.Append(0, lbls, int64(ts), float64(ts*100)) if err != nil { @@ -9434,7 +9434,7 @@ func TestGenerateCompactionDelay(t *testing.T) { // The offset is generated and changed while opening. assertDelay(db.opts.CompactionDelay, c.compactionDelayPercent) - for i := 0; i < 1000; i++ { + for range 1000 { assertDelay(db.generateCompactionDelay(), c.compactionDelayPercent) } } diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 8ea1acf1a9..cdbcd5cde6 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -308,7 +308,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) var buf [1024]byte - for i := int64(0); i < count; i++ { + for i := range count { idx := (startIndex + i) % int64(len(oldBuffer)) if oldBuffer[idx].ref != nil { ce.migrate(&oldBuffer[idx], buf[:]) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index dbd34cc48c..7577d755e0 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -427,7 +427,7 @@ func BenchmarkAddExemplar(b *testing.B) { var l labels.Labels b.StartTimer() - for i := 0; i < n; i++ { + for i := range n { if i%100 == 0 { l = labels.FromStrings("service", strconv.Itoa(i)) } @@ -525,12 +525,12 @@ func TestCircularExemplarStorage_Concurrent_AddExemplar_Resize(t *testing.T) { defer wg.Done() <-started - for i := 0; i < 100; i++ { + for range 100 { require.NoError(t, es.AddExemplar(l, e)) } }() - for i := 0; i < 100; i++ { + for i := range 100 { es.Resize(int64(i + 1)) if i == 0 { close(started) diff --git a/tsdb/fileutil/direct_io_writer_test.go b/tsdb/fileutil/direct_io_writer_test.go index ea010e5e96..e60df1f3bc 100644 --- a/tsdb/fileutil/direct_io_writer_test.go +++ b/tsdb/fileutil/direct_io_writer_test.go @@ -144,7 +144,7 @@ func TestDirectIOWriter(t *testing.T) { fileName := path.Join(t.TempDir(), "test") data := make([]byte, tc.dataSize) - for i := 0; i < len(data); i++ { + for i := range data { // Do not use 256 as it may be a divider of requiredAlignment. To avoid patterns. data[i] = byte(i % 251) } diff --git a/tsdb/head.go b/tsdb/head.go index cd0f771f96..7f66c06630 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -268,7 +268,7 @@ func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *H logger: l, opts: opts, memChunkPool: sync.Pool{ - New: func() interface{} { + New: func() any { return &memChunk{} }, }, diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 26b95880d3..8485d65435 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -560,7 +560,7 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState * // Iterate over the appendIDs, find the first one that the isolation state says not // to return. it := s.txs.iterator() - for index := 0; index < appendIDsToConsider; index++ { + for index := range appendIDsToConsider { appendID := it.At() if appendID <= isoState.maxAppendID { // Easy check first. if _, ok := isoState.incompleteAppends[appendID]; !ok { diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go index ae506c1d8e..b9f1700706 100644 --- a/tsdb/head_read_test.go +++ b/tsdb/head_read_test.go @@ -368,7 +368,7 @@ func TestMemSeries_chunk(t *testing.T) { } memChunkPool := &sync.Pool{ - New: func() interface{} { + New: func() any { return &memChunk{} }, } diff --git a/tsdb/head_test.go b/tsdb/head_test.go index e141c1dcfd..8fe14c35f9 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -123,7 +123,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { app := h.Appender(context.Background()) for _, s := range series[:seriesCount] { var ref storage.SeriesRef - for sampleIndex := int64(0); sampleIndex < samplesPerAppend; sampleIndex++ { + for sampleIndex := range samplesPerAppend { ref, err = app.Append(ref, s.Labels(), ts+sampleIndex, float64(ts+sampleIndex)) if err != nil { return err @@ -149,7 +149,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) { } } -func populateTestWL(t testing.TB, w *wlog.WL, recs []interface{}, buf []byte) []byte { +func populateTestWL(t testing.TB, w *wlog.WL, recs []any, buf []byte) []byte { var enc record.Encoder for _, r := range recs { buf = buf[:0] @@ -178,7 +178,7 @@ func populateTestWL(t testing.TB, w *wlog.WL, recs []interface{}, buf []byte) [] return buf } -func readTestWAL(t testing.TB, dir string) (recs []interface{}) { +func readTestWAL(t testing.TB, dir string) (recs []any) { sr, err := wlog.NewSegmentsReader(dir) require.NoError(t, err) defer func() { @@ -343,7 +343,7 @@ func BenchmarkLoadWLs(b *testing.B) { writeSeries = newWriteSeries } - buf = populateTestWL(b, wal, []interface{}{writeSeries}, buf) + buf = populateTestWL(b, wal, []any{writeSeries}, buf) } // Write samples. @@ -369,7 +369,7 @@ func BenchmarkLoadWLs(b *testing.B) { V: float64(i) * 100, }) } - buf = populateTestWL(b, wal, []interface{}{refSamples}, buf) + buf = populateTestWL(b, wal, []any{refSamples}, buf) } } @@ -397,7 +397,7 @@ func BenchmarkLoadWLs(b *testing.B) { // Write exemplars. refExemplars := make([]record.RefExemplar, 0, c.seriesPerBatch) - for i := 0; i < exemplarsPerSeries; i++ { + for i := range exemplarsPerSeries { for j := 0; j < c.batches; j++ { refExemplars = refExemplars[:0] for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ { @@ -408,14 +408,14 @@ func BenchmarkLoadWLs(b *testing.B) { Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i)), }) } - buf = populateTestWL(b, wal, []interface{}{refExemplars}, buf) + buf = populateTestWL(b, wal, []any{refExemplars}, buf) } } // Write OOO samples and mmap markers. refMarkers := make([]record.RefMmapMarker, 0, oooSeriesPerBatch) refSamples = make([]record.RefSample, 0, oooSeriesPerBatch) - for i := 0; i < oooSamplesPerSeries; i++ { + for i := range oooSamplesPerSeries { shouldAddMarkers := c.oooCapMax != 0 && i != 0 && int64(i)%c.oooCapMax == 0 for j := 0; j < c.batches; j++ { @@ -437,10 +437,10 @@ func BenchmarkLoadWLs(b *testing.B) { }) } if shouldAddMarkers { - populateTestWL(b, wbl, []interface{}{refMarkers}, buf) + populateTestWL(b, wbl, []any{refMarkers}, buf) } - buf = populateTestWL(b, wal, []interface{}{refSamples}, buf) - buf = populateTestWL(b, wbl, []interface{}{refSamples}, buf) + buf = populateTestWL(b, wal, []any{refSamples}, buf) + buf = populateTestWL(b, wbl, []any{refSamples}, buf) } } @@ -528,7 +528,7 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { endTs := startTs + uint64(DefaultBlockDuration) labelSets := make([]labels.Labels, seriesCnt) - for i := 0; i < seriesCnt; i++ { + for i := range seriesCnt { labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i)) } @@ -561,7 +561,7 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { workerReadyWg.Add(writeConcurrency + readConcurrency) // Start the write workers. - for wid := 0; wid < writeConcurrency; wid++ { + for wid := range writeConcurrency { // Create copy of workerID to be used by worker routine. workerID := wid @@ -579,7 +579,7 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { } app := head.Appender(ctx) - for i := 0; i < len(workerLabelSets); i++ { + for i := range workerLabelSets { // We also use the timestamp as the sample value. _, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts)) if err != nil { @@ -605,7 +605,7 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { readerTsCh := make(chan uint64) // Start the read workers. - for wid := 0; wid < readConcurrency; wid++ { + for wid := range readConcurrency { // Create copy of threadID to be used by worker routine. workerID := wid @@ -706,7 +706,7 @@ func TestHead_HighConcurrencyReadAndWrite(t *testing.T) { func TestHead_ReadWAL(t *testing.T) { for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { - entries := []interface{}{ + entries := []any{ []record.RefSeries{ {Ref: 10, Labels: labels.FromStrings("a", "1")}, {Ref: 11, Labels: labels.FromStrings("a", "2")}, @@ -891,14 +891,14 @@ func TestHead_WALMultiRef(t *testing.T) { func TestHead_WALCheckpointMultiRef(t *testing.T) { cases := []struct { name string - walEntries []interface{} + walEntries []any expectedWalExpiry int64 walTruncateMinT int64 - expectedWalEntries []interface{} + expectedWalEntries []any }{ { name: "Samples only; keep needed duplicate series record", - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -911,7 +911,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 500, walTruncateMinT: 500, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -923,7 +923,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, { name: "Tombstones only; keep needed duplicate series record", - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -936,7 +936,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 500, walTruncateMinT: 500, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -948,7 +948,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, { name: "Exemplars only; keep needed duplicate series record", - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -961,7 +961,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 500, walTruncateMinT: 500, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -973,7 +973,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, { name: "Histograms only; keep needed duplicate series record", - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -986,7 +986,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 500, walTruncateMinT: 500, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -998,7 +998,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, { name: "Float histograms only; keep needed duplicate series record", - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -1011,7 +1011,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 500, walTruncateMinT: 500, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -1024,7 +1024,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { { name: "All record types; keep needed duplicate series record until last record", // Series with 2 refs and samples for both - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -1047,7 +1047,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 800, walTruncateMinT: 700, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -1060,7 +1060,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { { name: "All record types; drop expired duplicate series record", // Series with 2 refs and samples for both - walEntries: []interface{}{ + walEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, {Ref: 2, Labels: labels.FromStrings("a", "1")}, @@ -1084,7 +1084,7 @@ func TestHead_WALCheckpointMultiRef(t *testing.T) { }, expectedWalExpiry: 800, walTruncateMinT: 900, - expectedWalEntries: []interface{}{ + expectedWalEntries: []any{ []record.RefSeries{ {Ref: 1, Labels: labels.FromStrings("a", "1")}, }, @@ -1232,7 +1232,7 @@ func TestHead_RaceBetweenSeriesCreationAndGC(t *testing.T) { const totalSeries = 100_000 series := make([]labels.Labels, totalSeries) - for i := 0; i < totalSeries; i++ { + for i := range totalSeries { series[i] = labels.FromStrings("foo", strconv.Itoa(i)) } done := atomic.NewBool(false) @@ -1245,7 +1245,7 @@ func TestHead_RaceBetweenSeriesCreationAndGC(t *testing.T) { t.Errorf("Failed to commit: %v", err) } }() - for i := 0; i < totalSeries; i++ { + for i := range totalSeries { _, err := app.Append(0, series[i], 100, 1) if err != nil { t.Errorf("Failed to append: %v", err) @@ -1343,7 +1343,7 @@ func BenchmarkHead_Truncate(b *testing.B) { allSeries := [total]labels.Labels{} nameValues := make([]string, 0, 100) - for i := 0; i < total; i++ { + for i := range int(total) { nameValues = nameValues[:0] // A thousand labels like lbl_x_of_1000, each with total/1000 values @@ -1491,7 +1491,7 @@ func TestMemSeries_truncateChunks(t *testing.T) { } memChunkPool := sync.Pool{ - New: func() interface{} { + New: func() any { return &memChunk{} }, } @@ -1699,7 +1699,7 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) { func TestHeadDeleteSeriesWithoutSamples(t *testing.T) { for _, compress := range []compression.Type{compression.None, compression.Snappy, compression.Zstd} { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { - entries := []interface{}{ + entries := []any{ []record.RefSeries{ {Ref: 10, Labels: labels.FromStrings("a", "1")}, }, @@ -1867,7 +1867,7 @@ func TestDeleteUntilCurMax(t *testing.T) { numSamples := int64(10) app := hb.Appender(context.Background()) smpls := make([]float64, numSamples) - for i := int64(0); i < numSamples; i++ { + for i := range numSamples { smpls[i] = rand.Float64() _, err := app.Append(0, labels.FromStrings("a", "b"), i, smpls[i]) require.NoError(t, err) @@ -1914,7 +1914,7 @@ func TestDeletedSamplesAndSeriesStillInWALAfterCheckpoint(t *testing.T) { // Enough samples to cause a checkpoint. hb, w := newTestHead(t, int64(numSamples)*10, compression.None, false) - for i := 0; i < numSamples; i++ { + for i := range numSamples { app := hb.Appender(context.Background()) _, err := app.Append(0, labels.FromStrings("a", "b"), int64(i), 0) require.NoError(t, err) @@ -2014,7 +2014,7 @@ func TestDelete_e2e(t *testing.T) { ls := labels.New(l...) series := []chunks.Sample{} ts := rand.Int63n(300) - for i := 0; i < numDatapoints; i++ { + for range numDatapoints { v := rand.Float64() _, err := app.Append(0, ls, ts, v) require.NoError(t, err) @@ -2063,7 +2063,7 @@ func TestDelete_e2e(t *testing.T) { } } sort.Sort(matched) - for i := 0; i < numRanges; i++ { + for range numRanges { q, err := NewBlockQuerier(hb, 0, 100000) require.NoError(t, err) ss := q.Select(context.Background(), true, nil, del.ms...) @@ -2341,7 +2341,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { var nextTs int64 var totalAppendedSamples int - for i := 0; i < samplesPerChunk/4; i++ { + for i := range samplesPerChunk / 4 { ok, _ := s.append(nextTs, float64(i), 0, cOpts) require.Truef(t, ok, "slow sample %d was not appended", i) nextTs += slowRate @@ -2350,7 +2350,7 @@ func TestMemSeries_append_atVariableRate(t *testing.T) { require.Equal(t, DefaultBlockDuration, s.nextAt, "after appending a samplesPerChunk/4 samples at a slow rate, we should aim to cut a new block at the default block duration %d, but it's set to %d", DefaultBlockDuration, s.nextAt) // Suddenly, the rate increases and we receive a sample every millisecond. - for i := 0; i < math.MaxUint16; i++ { + for i := range math.MaxUint16 { ok, _ := s.append(nextTs, float64(i), 0, cOpts) require.Truef(t, ok, "quick sample %d was not appended", i) nextTs++ @@ -2584,7 +2584,7 @@ func TestHead_ReturnsSortedLabelValues(t *testing.T) { app := h.appender() for i := 100; i > 0; i-- { - for j := 0; j < 10; j++ { + for j := range 10 { lset := labels.FromStrings( "__name__", fmt.Sprintf("metric_%d", i), "label", fmt.Sprintf("value_%d", j), @@ -2800,7 +2800,7 @@ func TestHeadReadWriterRepair(t *testing.T) { s, created, _ := h.getOrCreate(1, labels.FromStrings("a", "1"), false) require.True(t, created, "series was not created") - for i := 0; i < 7; i++ { + for i := range 7 { ok, chunkCreated := s.append(int64(i*chunkRange), float64(i*chunkRange), 0, cOpts) require.True(t, ok, "series append failed") require.True(t, chunkCreated, "chunk was not created") @@ -3166,7 +3166,7 @@ func TestIsolationAppendIDZeroIsNoop(t *testing.T) { func TestHeadSeriesChunkRace(t *testing.T) { t.Parallel() - for i := 0; i < 1000; i++ { + for range 1000 { testHeadSeriesChunkRace(t) } } @@ -3402,7 +3402,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { ctx := context.Background() app := head.Appender(context.Background()) - for i := 0; i < 100; i++ { + for i := range 100 { _, err := app.Append(0, labels.FromStrings( "tens", fmt.Sprintf("value%d", i/10), "unique", fmt.Sprintf("value%d", i), @@ -3412,7 +3412,7 @@ func TestHeadLabelValuesWithMatchers(t *testing.T) { require.NoError(t, app.Commit()) var uniqueWithout30s []string - for i := 0; i < 100; i++ { + for i := range 100 { if i/10 != 3 { uniqueWithout30s = append(uniqueWithout30s, fmt.Sprintf("value%d", i)) } @@ -3478,7 +3478,7 @@ func TestHeadLabelNamesWithMatchers(t *testing.T) { }() app := head.Appender(context.Background()) - for i := 0; i < 100; i++ { + for i := range 100 { _, err := app.Append(0, labels.FromStrings( "unique", fmt.Sprintf("value%d", i), ), 100, 0) @@ -3551,7 +3551,7 @@ func TestHeadShardedPostings(t *testing.T) { // Append some series. app := head.Appender(ctx) - for i := 0; i < 100; i++ { + for i := range 100 { _, err := app.Append(0, labels.FromStrings("unique", fmt.Sprintf("value%d", i), "const", "1"), 100, 0) require.NoError(t, err) } @@ -3576,7 +3576,7 @@ func TestHeadShardedPostings(t *testing.T) { actualShards := make(map[uint64][]storage.SeriesRef) actualPostings := make([]storage.SeriesRef, 0, len(expected)) - for shardIndex := uint64(0); shardIndex < shardCount; shardIndex++ { + for shardIndex := range shardCount { p, err = ir.Postings(ctx, "const", "1") require.NoError(t, err) @@ -3705,7 +3705,7 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) { app := head.Appender(context.Background()) metricCount := 1000000 - for i := 0; i < metricCount; i++ { + for i := range metricCount { _, err := app.Append(0, labels.FromStrings( "a_unique", fmt.Sprintf("value%d", i), "b_tens", fmt.Sprintf("value%d", i/(metricCount/10)), @@ -3744,13 +3744,13 @@ func TestIteratorSeekIntoBuffer(t *testing.T) { s := newMemSeries(labels.Labels{}, 1, 0, defaultIsolationDisabled, false) - for i := 0; i < 7; i++ { + for i := range 7 { ok, _ := s.append(int64(i), float64(i), 0, cOpts) require.True(t, ok, "sample append failed") } c, _, _, err := s.chunk(0, chunkDiskMapper, &sync.Pool{ - New: func() interface{} { + New: func() any { return &memChunk{} }, }) @@ -4348,7 +4348,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { require.NoError(t, app.Commit()) app = head.Appender(context.Background()) // Add some float. - for i := 0; i < 10; i++ { + for range 10 { ts++ _, err := app.Append(0, s2, ts, float64(ts)) require.NoError(t, err) @@ -4384,7 +4384,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { require.NoError(t, app.Commit()) app = head.Appender(context.Background()) // Add some float. - for i := 0; i < 10; i++ { + for range 10 { ts++ _, err := app.Append(0, s2, ts, float64(ts)) require.NoError(t, err) @@ -4835,7 +4835,7 @@ func TestHistogramMetrics(t *testing.T) { expHSeries, expHSamples := 0, 0 - for x := 0; x < 5; x++ { + for x := range 5 { expHSeries++ l := labels.FromStrings("a", fmt.Sprintf("b%d", x)) for i, h := range tsdbutil.GenerateTestHistograms(numHistograms) { @@ -5130,7 +5130,7 @@ func TestHistogramCounterResetHeader(t *testing.T) { checkExpCounterResetHeader(chunkenc.CounterReset) // Add 2 non-counter reset histogram chunks. Just to have some non-counter reset chunks in between. - for i := 0; i < 2000; i++ { + for range 2000 { appendHistogram(h) } checkExpCounterResetHeader(chunkenc.NotCounterReset, chunkenc.NotCounterReset) @@ -5760,7 +5760,7 @@ func TestHeadInit_DiscardChunksWithUnsupportedEncoding(t *testing.T) { seriesLabels := labels.FromStrings("a", "1") var seriesRef storage.SeriesRef var err error - for i := 0; i < 400; i++ { + for i := range 400 { seriesRef, err = app.Append(0, seriesLabels, int64(i), float64(i)) require.NoError(t, err) } @@ -5846,7 +5846,7 @@ func TestMmapPanicAfterMmapReplayCorruption(t *testing.T) { addChunks := func() { interval := DefaultBlockDuration / (4 * 120) app := h.Appender(context.Background()) - for i := 0; i < 250; i++ { + for i := range 250 { ref, err = app.Append(ref, lbls, lastTs, float64(lastTs)) lastTs += interval if i%10 == 0 { @@ -5909,7 +5909,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { addSamples := func(numSamples int) { app := h.Appender(context.Background()) var ref storage.SeriesRef - for i := 0; i < numSamples; i++ { + for i := range numSamples { ref, err = app.Append(ref, lbls, lastTs, float64(lastTs)) expSamples = append(expSamples, sample{t: lastTs, f: float64(lastTs)}) require.NoError(t, err) @@ -5923,7 +5923,7 @@ func TestReplayAfterMmapReplayError(t *testing.T) { } // Creating multiple m-map files. - for i := 0; i < 5; i++ { + for i := range 5 { addSamples(250) require.NoError(t, h.Close()) if i != 4 { @@ -6153,7 +6153,7 @@ func TestGaugeHistogramWALAndChunkHeader(t *testing.T) { checkHeaders() recs := readTestWAL(t, head.wal.Dir()) - require.Equal(t, []interface{}{ + require.Equal(t, []any{ []record.RefSeries{ { Ref: 1, @@ -6229,7 +6229,7 @@ func TestGaugeFloatHistogramWALAndChunkHeader(t *testing.T) { checkHeaders() recs := readTestWAL(t, head.wal.Dir()) - require.Equal(t, []interface{}{ + require.Equal(t, []any{ []record.RefSeries{ { Ref: 1, @@ -6267,7 +6267,7 @@ func TestSnapshotAheadOfWALError(t *testing.T) { require.NoError(t, app.Commit()) // Increment snapshot index to create sufficiently large difference. - for i := 0; i < 2; i++ { + for range 2 { _, err = head.wal.NextSegment() require.NoError(t, err) } @@ -6512,7 +6512,7 @@ func TestHeadDetectsDuplicateSampleAtSizeLimit(t *testing.T) { a := h.Appender(context.Background()) var err error vals := []float64{math.MaxFloat64, 0x00} // Use the worst case scenario for the XOR encoding. Otherwise we hit the sample limit before the size limit. - for i := 0; i < numSamples; i++ { + for i := range numSamples { ts := baseTS + int64(i/2)*10000 a.Append(0, labels.FromStrings("foo", "bar"), ts, vals[(i/2)%len(vals)]) err = a.Commit() @@ -7101,7 +7101,7 @@ func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(sto // memSeries.lastHistogram to be corrupt and fail the duplicate check. go func() { defer wg.Done() - for i := 0; i < 10000; i++ { + for i := range 10000 { app := head.Appender(context.Background()) require.NoError(t, appendFn(app, i)) require.NoError(t, app.Commit()) @@ -7110,7 +7110,7 @@ func testHeadAppendHistogramAndCommitConcurrency(t *testing.T, appendFn func(sto go func() { defer wg.Done() - for i := 0; i < 10000; i++ { + for i := range 10000 { app := head.Appender(context.Background()) require.NoError(t, appendFn(app, i)) require.NoError(t, app.Commit()) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 9b0982423f..858126b81c 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -99,7 +99,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch shards = make([][]record.RefSample, concurrency) histogramShards = make([][]histogramRecord, concurrency) - decoded = make(chan interface{}, 10) + decoded = make(chan any, 10) decodeErr, seriesCreationErr error ) @@ -107,7 +107,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // For CorruptionErr ensure to terminate all workers before exiting. _, ok := err.(*wlog.CorruptionErr) if ok || seriesCreationErr != nil { - for i := 0; i < concurrency; i++ { + for i := range concurrency { processors[i].closeAndDrain() } close(exemplarsInput) @@ -116,7 +116,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch }() wg.Add(concurrency) - for i := 0; i < concurrency; i++ { + for i := range concurrency { processors[i].setup() go func(wp *walSubsetProcessor) { @@ -281,7 +281,7 @@ Outer: // of unused memory. for len(samples) > 0 { m := min(len(samples), 5000) - for i := 0; i < concurrency; i++ { + for i := range concurrency { if shards[i] == nil { shards[i] = processors[i].reuseBuf() } @@ -298,7 +298,7 @@ Outer: mod := uint64(sam.Ref) % uint64(concurrency) shards[mod] = append(shards[mod], sam) } - for i := 0; i < concurrency; i++ { + for i := range concurrency { if len(shards[i]) > 0 { processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]} shards[i] = nil @@ -349,7 +349,7 @@ Outer: // of unused memory. for len(samples) > 0 { m := min(len(samples), 5000) - for i := 0; i < concurrency; i++ { + for i := range concurrency { if histogramShards[i] == nil { histogramShards[i] = processors[i].reuseHistogramBuf() } @@ -366,7 +366,7 @@ Outer: mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } - for i := 0; i < concurrency; i++ { + for i := range concurrency { if len(histogramShards[i]) > 0 { processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]} histogramShards[i] = nil @@ -384,7 +384,7 @@ Outer: // of unused memory. for len(samples) > 0 { m := min(len(samples), 5000) - for i := 0; i < concurrency; i++ { + for i := range concurrency { if histogramShards[i] == nil { histogramShards[i] = processors[i].reuseHistogramBuf() } @@ -401,7 +401,7 @@ Outer: mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } - for i := 0; i < concurrency; i++ { + for i := range concurrency { if len(histogramShards[i]) > 0 { processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]} histogramShards[i] = nil @@ -445,7 +445,7 @@ Outer: } // Signal termination to each worker and wait for it to close its output channel. - for i := 0; i < concurrency; i++ { + for i := range concurrency { processors[i].closeAndDrain() } close(exemplarsInput) @@ -735,7 +735,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch shards = make([][]record.RefSample, concurrency) histogramShards = make([][]histogramRecord, concurrency) - decodedCh = make(chan interface{}, 10) + decodedCh = make(chan any, 10) decodeErr error ) @@ -745,7 +745,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch _, ok := err.(*wlog.CorruptionErr) if ok { err = &errLoadWbl{err: err} - for i := 0; i < concurrency; i++ { + for i := range concurrency { processors[i].closeAndDrain() } wg.Wait() @@ -753,7 +753,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch }() wg.Add(concurrency) - for i := 0; i < concurrency; i++ { + for i := range concurrency { processors[i].setup() go func(wp *wblSubsetProcessor) { @@ -838,7 +838,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // of unused memory. for len(samples) > 0 { m := min(len(samples), 5000) - for i := 0; i < concurrency; i++ { + for i := range concurrency { if shards[i] == nil { shards[i] = processors[i].reuseBuf() } @@ -850,7 +850,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch mod := uint64(sam.Ref) % uint64(concurrency) shards[mod] = append(shards[mod], sam) } - for i := 0; i < concurrency; i++ { + for i := range concurrency { if len(shards[i]) > 0 { processors[i].input <- wblSubsetProcessorInputItem{samples: shards[i]} shards[i] = nil @@ -891,7 +891,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // of unused memory. for len(samples) > 0 { m := min(len(samples), 5000) - for i := 0; i < concurrency; i++ { + for i := range concurrency { if histogramShards[i] == nil { histogramShards[i] = processors[i].reuseHistogramBuf() } @@ -903,7 +903,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } - for i := 0; i < concurrency; i++ { + for i := range concurrency { if len(histogramShards[i]) > 0 { processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]} histogramShards[i] = nil @@ -920,7 +920,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // of unused memory. for len(samples) > 0 { m := min(len(samples), 5000) - for i := 0; i < concurrency; i++ { + for i := range concurrency { if histogramShards[i] == nil { histogramShards[i] = processors[i].reuseHistogramBuf() } @@ -932,7 +932,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } - for i := 0; i < concurrency; i++ { + for i := range concurrency { if len(histogramShards[i]) > 0 { processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]} histogramShards[i] = nil @@ -952,7 +952,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } // Signal termination to each worker and wait for it to close its output channel. - for i := 0; i < concurrency; i++ { + for i := range concurrency { processors[i].closeAndDrain() } wg.Wait() @@ -1163,7 +1163,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte { switch enc { case chunkenc.EncXOR: // Backwards compatibility for old sampleBuf which had last 4 samples. - for i := 0; i < 3; i++ { + for range 3 { buf.PutBE64int64(0) buf.PutBEFloat64(0) } @@ -1216,7 +1216,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh switch enc { case chunkenc.EncXOR: // Backwards-compatibility for old sampleBuf which had last 4 samples. - for i := 0; i < 3; i++ { + for range 3 { _ = dec.Be64int64() _ = dec.Be64Float64() } @@ -1329,7 +1329,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { ) // Add all series to the snapshot. stripeSize := h.series.size - for i := 0; i < stripeSize; i++ { + for i := range stripeSize { h.series.locks[i].RLock() for _, s := range h.series.series[i] { @@ -1467,7 +1467,7 @@ func LastChunkSnapshot(dir string) (string, int, int, error) { } maxIdx, maxOffset := -1, -1 maxFileName := "" - for i := 0; i < len(files); i++ { + for i := range files { fi := files[i] if !strings.HasPrefix(fi.Name(), chunkSnapshotPrefix) { @@ -1578,7 +1578,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie ) wg.Add(concurrency) - for i := 0; i < concurrency; i++ { + for i := range concurrency { go func(idx int, rc <-chan chunkSnapshotRecord) { defer wg.Done() defer func() { @@ -1678,9 +1678,7 @@ Outer: refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries) for _, shard := range shardedRefSeries { - for k, v := range shard { - refSeries[k] = v - } + maps.Copy(refSeries, shard) } } @@ -1747,9 +1745,7 @@ Outer: // We had no exemplar record, so we have to build the map here. refSeries = make(map[chunks.HeadSeriesRef]*memSeries, numSeries) for _, shard := range shardedRefSeries { - for k, v := range shard { - refSeries[k] = v - } + maps.Copy(refSeries, shard) } } diff --git a/tsdb/index/index.go b/tsdb/index/index.go index cd598fed65..28eacd7c00 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -770,7 +770,7 @@ func (w *Writer) writePostingsToTmpFiles() error { // See if label names we want are in the series. numLabels := d.Uvarint() - for i := 0; i < numLabels; i++ { + for range numLabels { lno := uint32(d.Uvarint()) lvo := uint32(d.Uvarint()) @@ -1797,7 +1797,7 @@ func (*Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { k := d.Uvarint() offsets := make([]uint32, k) - for i := 0; i < k; i++ { + for i := range k { offsets[i] = uint32(d.Uvarint()) _ = d.Uvarint() // skip the label value @@ -1814,7 +1814,7 @@ func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) ( d := encoding.Decbuf{B: b} k := d.Uvarint() - for i := 0; i < k; i++ { + for range k { lno := uint32(d.Uvarint()) lvo := uint32(d.Uvarint()) diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 848bc434e4..1ffa9bdb3d 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -210,7 +210,7 @@ func TestIndexRW_Postings(t *testing.T) { actualShards := make(map[uint64][]storage.SeriesRef) actualPostings := make([]storage.SeriesRef, 0, len(expected)) - for shardIndex := uint64(0); shardIndex < shardCount; shardIndex++ { + for shardIndex := range shardCount { p, err = ir.Postings(ctx, "a", "1") require.NoError(t, err) @@ -394,7 +394,7 @@ func TestPersistence_index_e2e(t *testing.T) { require.NoError(t, err) require.Len(t, res, len(v)) - for i := 0; i < len(v); i++ { + for i := range v { require.Equal(t, v[i], res[i]) } } @@ -466,7 +466,7 @@ func TestSymbols(t *testing.T) { symbolsStart := buf.Len() buf.PutBE32int(204) // Length of symbols table. buf.PutBE32int(100) // Number of symbols. - for i := 0; i < 100; i++ { + for i := range 100 { // i represents index in unicode characters table. buf.PutUvarintStr(string(rune(i))) // Symbol. } diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 55954e8a88..d5a17c3daa 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -47,7 +47,7 @@ const ensureOrderBatchSize = 1024 // ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder(). var ensureOrderBatchPool = sync.Pool{ - New: func() interface{} { + New: func() any { x := make([][]storage.SeriesRef, 0, ensureOrderBatchSize) return &x // Return pointer type as preferred by Pool. }, @@ -1023,14 +1023,14 @@ func (h postingsWithIndexHeap) Less(i, j int) bool { func (h *postingsWithIndexHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } // Push implements heap.Interface. -func (h *postingsWithIndexHeap) Push(x interface{}) { +func (h *postingsWithIndexHeap) Push(x any) { *h = append(*h, x.(postingsWithIndex)) } // Pop implements heap.Interface and pops the last element, which is NOT the min element, // so this doesn't return the same heap.Pop() // Although this method is implemented for correctness, we don't expect it to be used, see popIndex() method for details. -func (h *postingsWithIndexHeap) Pop() interface{} { +func (h *postingsWithIndexHeap) Pop() any { old := *h n := len(old) x := old[n-1] diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index feaba90e52..7dffd3be9d 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -48,7 +48,7 @@ func TestMemPostings_ensureOrder(t *testing.T) { p := NewUnorderedMemPostings() p.m["a"] = map[string][]storage.SeriesRef{} - for i := 0; i < 100; i++ { + for i := range 100 { l := make([]storage.SeriesRef, 100) for j := range l { l[j] = storage.SeriesRef(rand.Uint64()) @@ -321,7 +321,7 @@ func BenchmarkIntersect(t *testing.B) { t.Run("LongPostings2", func(bench *testing.B) { var a, b, c, d []storage.SeriesRef - for i := 0; i < 12500000; i++ { + for i := range 12500000 { a = append(a, storage.SeriesRef(i)) } for i := 7500000; i < 12500000; i++ { @@ -353,7 +353,7 @@ func BenchmarkIntersect(t *testing.B) { var refs [][]storage.SeriesRef // Create 100000 matchers(k=100000), making sure all memory allocation is done before starting the loop. - for i := 0; i < 100000; i++ { + for range 100000 { var temp []storage.SeriesRef for j := storage.SeriesRef(1); j < 100; j++ { temp = append(temp, j) @@ -383,7 +383,7 @@ func BenchmarkMerge(t *testing.B) { var refs [][]storage.SeriesRef // Create 100000 matchers(k=100000), making sure all memory allocation is done before starting the loop. - for i := 0; i < 100000; i++ { + for i := range 100000 { var temp []storage.SeriesRef for j := 1; j < 100; j++ { temp = append(temp, storage.SeriesRef(i+j*100000)) @@ -643,7 +643,7 @@ func TestRemovedNextStackoverflow(t *testing.T) { var remove []storage.SeriesRef var i storage.SeriesRef - for i = 0; i < 1e7; i++ { + for i = range github.com / prometheus / prometheus / storage.SeriesRef(1e7) { full = append(full, i) remove = append(remove, i) } @@ -756,14 +756,14 @@ func TestBigEndian(t *testing.T) { } beLst := make([]byte, num*4) - for i := 0; i < num; i++ { + for i := range num { b := beLst[i*4 : i*4+4] binary.BigEndian.PutUint32(b, ls[i]) } t.Run("Iteration", func(t *testing.T) { bep := newBigEndianPostings(beLst) - for i := 0; i < num; i++ { + for i := range num { require.True(t, bep.Next()) require.Equal(t, storage.SeriesRef(ls[i]), bep.At()) } @@ -925,7 +925,7 @@ func BenchmarkPostings_Stats(b *testing.B) { } } createPostingsLabelValues("__name__", "metrics_name_can_be_very_big_and_bad", 1e3) - for i := 0; i < 20; i++ { + for i := range 20 { createPostingsLabelValues(fmt.Sprintf("host-%d", i), "metrics_name_can_be_very_big_and_bad", 1e3) createPostingsLabelValues(fmt.Sprintf("instance-%d", i), "10.0.IP.", 1e3) createPostingsLabelValues(fmt.Sprintf("job-%d", i), "Small_Job_name", 1e3) @@ -1029,7 +1029,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { const total = 1e6 allSeries := [total]labels.Labels{} nameValues := make([]string, 0, 100) - for i := 0; i < total; i++ { + for i := range int(total) { nameValues = nameValues[:0] // A thousand labels like lbl_x_of_1000, each with total/1000 values @@ -1062,7 +1062,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { stop := make(chan struct{}) wg := sync.WaitGroup{} - for i := 0; i < reads; i++ { + for i := range reads { wg.Add(1) go func(i int) { lbl := "lbl_" + itoa(i) + "_of_100" @@ -1087,7 +1087,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) { for n := 0; n < b.N; n++ { deleted := make(map[storage.SeriesRef]struct{}, refs) affected := make(map[labels.Label]struct{}, refs) - for i := 0; i < refs; i++ { + for i := range refs { ref := storage.SeriesRef(n*refs + i) deleted[ref] = struct{}{} allSeries[ref].Range(func(l labels.Label) { @@ -1361,13 +1361,13 @@ func TestListPostings(t *testing.T) { for _, c := range []int{2, 8, 9, 10} { t.Run(fmt.Sprintf("count=%d", c), func(t *testing.T) { list := make([]storage.SeriesRef, c) - for i := 0; i < c; i++ { + for i := range c { list[i] = storage.SeriesRef(i * 10) } t.Run("all one by one", func(t *testing.T) { p := NewListPostings(list) - for i := 0; i < c; i++ { + for i := range c { require.True(t, p.Seek(storage.SeriesRef(i*10))) require.Equal(t, storage.SeriesRef(i*10), p.At()) } @@ -1391,7 +1391,7 @@ func TestListPostings(t *testing.T) { func BenchmarkListPostings(b *testing.B) { const maxCount = 1e6 input := make([]storage.SeriesRef, maxCount) - for i := 0; i < maxCount; i++ { + for i := range int(maxCount) { input[i] = storage.SeriesRef(i << 2) } @@ -1437,8 +1437,8 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) { for _, labelValueCount := range []int{1_000, 10_000, 100_000} { b.Run(fmt.Sprintf("labels=%d", labelValueCount), func(b *testing.B) { mp := NewMemPostings() - for i := 0; i < labelValueCount; i++ { - for j := 0; j < seriesPerLabel; j++ { + for i := range labelValueCount { + for j := range seriesPerLabel { mp.Add(storage.SeriesRef(i*seriesPerLabel+j), labels.FromStrings("__name__", strconv.Itoa(j), "label", strconv.Itoa(i))) } } diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 82f506bc80..3ce3573106 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -23,7 +23,7 @@ func TestPostingsStats(t *testing.T) { const maxCount = 3000000 const heapLength = 10 stats.init(heapLength) - for i := 0; i < maxCount; i++ { + for i := range maxCount { item := Stat{ Name: "Label-da", Count: uint64(i), @@ -34,7 +34,7 @@ func TestPostingsStats(t *testing.T) { data := stats.get() require.Len(t, data, 10) - for i := 0; i < heapLength; i++ { + for i := range heapLength { require.Equal(t, uint64(maxCount-i), data[i].Count) } } @@ -62,7 +62,7 @@ func BenchmarkPostingStatsMaxHep(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { stats.init(heapLength) - for i := 0; i < maxCount; i++ { + for i := range maxCount { item := Stat{ Name: "Label-da", Count: uint64(i), diff --git a/tsdb/isolation.go b/tsdb/isolation.go index 1035991e74..95d3cfa5eb 100644 --- a/tsdb/isolation.go +++ b/tsdb/isolation.go @@ -87,7 +87,7 @@ func newIsolation(disabled bool) *isolation { appendsOpenList: appender, readsOpen: isoState, disabled: disabled, - appendersPool: sync.Pool{New: func() interface{} { return &isolationAppender{} }}, + appendersPool: sync.Pool{New: func() any { return &isolationAppender{} }}, } } diff --git a/tsdb/isolation_test.go b/tsdb/isolation_test.go index 7caff57d88..b01a846ea6 100644 --- a/tsdb/isolation_test.go +++ b/tsdb/isolation_test.go @@ -87,7 +87,7 @@ func BenchmarkIsolation(b *testing.B) { wg := sync.WaitGroup{} start := make(chan struct{}) - for g := 0; g < goroutines; g++ { + for range goroutines { wg.Add(1) go func() { @@ -117,7 +117,7 @@ func BenchmarkIsolationWithState(b *testing.B) { wg := sync.WaitGroup{} start := make(chan struct{}) - for g := 0; g < goroutines; g++ { + for range goroutines { wg.Add(1) go func() { diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index 2d5901a13b..8f773b6ef9 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -33,7 +33,7 @@ func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]= func makeEvenSampleSlice(n int, sampleFunc func(ts int64) sample) []sample { s := make([]sample, n) - for i := 0; i < n; i++ { + for i := range n { s[i] = sampleFunc(valEven(i)) } return s diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 56b5a050f1..25573bd312 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -46,9 +46,9 @@ func BenchmarkQuerier(b *testing.B) { app.Append(0, l, 0, 0) } - for n := 0; n < 10; n++ { + for n := range 10 { addSeries(labels.FromStrings("a", strconv.Itoa(n)+postingsBenchSuffix)) - for i := 0; i < 100000; i++ { + for i := range 100000 { addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "foo")) // Have some series that won't be matched, to properly test inverted matches. addSeries(labels.FromStrings("i", strconv.Itoa(i)+postingsBenchSuffix, "n", strconv.Itoa(n)+postingsBenchSuffix, "j", "bar")) @@ -246,13 +246,13 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) { func BenchmarkMergedStringIter(b *testing.B) { numSymbols := 100000 s := make([]string, numSymbols) - for i := 0; i < numSymbols; i++ { + for i := range numSymbols { s[i] = fmt.Sprintf("symbol%v", i) } for i := 0; i < b.N; i++ { it := NewMergedStringIter(index.NewStringListIter(s), index.NewStringListIter(s)) - for j := 0; j < 100; j++ { + for range 100 { it = NewMergedStringIter(it, index.NewStringListIter(s)) } @@ -278,7 +278,7 @@ func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(ap h := db.Head() app := h.Appender(context.Background()) - for i := 0; i < numSeries; i++ { + for i := range numSeries { addSeries(app, i) if i%1000 == 999 { // Commit every so often, so the appender doesn't get too big. require.NoError(b, app.Commit()) diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index 3dae3a9a47..eb980d3450 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -2092,7 +2092,7 @@ func TestDeletedIterator(t *testing.T) { require.NoError(t, err) // Insert random stuff from (0, 1000). act := make([]sample, 1000) - for i := 0; i < 1000; i++ { + for i := range 1000 { act[i].t = int64(i) act[i].f = rand.Float64() app.Append(act[i].t, act[i].f) @@ -2152,7 +2152,7 @@ func TestDeletedIterator_WithSeek(t *testing.T) { require.NoError(t, err) // Insert random stuff from (0, 1000). act := make([]sample, 1000) - for i := 0; i < 1000; i++ { + for i := range 1000 { act[i].t = int64(i) act[i].f = float64(i) app.Append(act[i].t, act[i].f) @@ -3092,7 +3092,6 @@ func TestQuerierIndexQueriesRace(t *testing.T) { } for _, c := range testCases { - c := c t.Run(fmt.Sprintf("%v", c.matchers), func(t *testing.T) { t.Parallel() db := openTestDB(t, DefaultOptions(), nil) @@ -3107,7 +3106,7 @@ func TestQuerierIndexQueriesRace(t *testing.T) { t.Cleanup(wg.Wait) t.Cleanup(cancel) - for i := 0; i < testRepeats; i++ { + for range testRepeats { q, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) @@ -3506,8 +3505,8 @@ func BenchmarkHeadChunkQuerier(b *testing.B) { // 3h of data. numTimeseries := 100 app := db.Appender(context.Background()) - for i := 0; i < 120*6; i++ { - for j := 0; j < numTimeseries; j++ { + for i := range 120 * 6 { + for j := range numTimeseries { lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j)) if i%10 == 0 { require.NoError(b, app.Commit()) @@ -3551,8 +3550,8 @@ func BenchmarkHeadQuerier(b *testing.B) { // 3h of data. numTimeseries := 100 app := db.Appender(context.Background()) - for i := 0; i < 120*6; i++ { - for j := 0; j < numTimeseries; j++ { + for i := range 120 * 6 { + for j := range numTimeseries { lbls := labels.FromStrings("foo", fmt.Sprintf("bar%d", j)) if i%10 == 0 { require.NoError(b, app.Commit()) @@ -3622,7 +3621,7 @@ func TestQueryWithDeletedHistograms(t *testing.T) { ) lbs := labels.FromStrings("__name__", "test", "type", name) - for i := 0; i < 100; i++ { + for i := range 100 { h, fh := tc(i) seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, fh) require.NoError(t, err) @@ -3682,7 +3681,7 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) { lbs := labels.FromStrings("__name__", "test") // Create an int histogram chunk with samples between 0 - 20 and 30 - 40. - for i := 0; i < 20; i++ { + for i := range 20 { h := tsdbutil.GenerateTestHistogram(1) seriesRef, err = appender.AppendHistogram(seriesRef, lbs, int64(i), h, nil) require.NoError(t, err) diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 76f44c0cd7..bcddad1b52 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -262,7 +262,7 @@ func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, err // We can skip the rest of the fields (if we encounter any), but we must decode them anyway // so we can correctly align with the start with the next metadata record. var unit, help string - for i := 0; i < numFields; i++ { + for range numFields { fieldName := dec.UvarintStr() fieldValue := dec.UvarintStr() switch fieldName { @@ -293,7 +293,7 @@ func (*Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, err func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels { d.builder.Reset() nLabels := dec.Uvarint() - for i := 0; i < nLabels; i++ { + for range nLabels { lName := dec.UvarintBytes() lValue := dec.UvarintBytes() d.builder.UnsafeAddBytes(lName, lValue) diff --git a/tsdb/tombstones/tombstones_test.go b/tsdb/tombstones/tombstones_test.go index 9b5093b261..de036e22d0 100644 --- a/tsdb/tombstones/tombstones_test.go +++ b/tsdb/tombstones/tombstones_test.go @@ -38,12 +38,12 @@ func TestWriteAndReadbackTombstones(t *testing.T) { stones := NewMemTombstones() // Generate the tombstones. - for i := 0; i < 100; i++ { + for range 100 { ref += uint64(rand.Int31n(10)) + 1 numRanges := rand.Intn(5) + 1 dranges := make(Intervals, 0, numRanges) mint := rand.Int63n(time.Now().UnixNano()) - for j := 0; j < numRanges; j++ { + for range numRanges { dranges = dranges.Add(Interval{mint, mint + rand.Int63n(1000)}) mint += rand.Int63n(1000) + 1 } @@ -263,13 +263,13 @@ func TestMemTombstonesConcurrency(t *testing.T) { wg.Add(2) go func() { - for x := 0; x < totalRuns; x++ { + for x := range totalRuns { tomb.AddInterval(storage.SeriesRef(x), Interval{int64(x), int64(x)}) } wg.Done() }() go func() { - for x := 0; x < totalRuns; x++ { + for x := range totalRuns { _, err := tomb.Get(storage.SeriesRef(x)) require.NoError(t, err) } diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index a923519ef7..64311a8c3b 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -20,7 +20,7 @@ import ( ) func GenerateTestHistograms(n int) (r []*histogram.Histogram) { - for i := 0; i < n; i++ { + for i := range n { h := GenerateTestHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset @@ -58,7 +58,7 @@ func GenerateTestHistogram(i int64) *histogram.Histogram { } func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) { - for i := 0; i < n; i++ { + for i := range n { h := GenerateTestCustomBucketsHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset @@ -83,7 +83,7 @@ func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram { } func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { - for x := 0; x < n; x++ { + for x := range n { i := int64(math.Sin(float64(x))*100) + 100 r = append(r, GenerateTestGaugeHistogram(i)) } @@ -97,7 +97,7 @@ func GenerateTestGaugeHistogram(i int64) *histogram.Histogram { } func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { - for i := 0; i < n; i++ { + for i := range n { h := GenerateTestFloatHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset @@ -129,7 +129,7 @@ func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram { } func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) { - for i := 0; i < n; i++ { + for i := range n { h := GenerateTestCustomBucketsFloatHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset @@ -154,7 +154,7 @@ func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram } func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { - for x := 0; x < n; x++ { + for x := range n { i := int64(math.Sin(float64(x))*100) + 100 r = append(r, GenerateTestGaugeFloatHistogram(i)) } diff --git a/tsdb/wlog/checkpoint.go b/tsdb/wlog/checkpoint.go index 5c607d7030..64abe21aa9 100644 --- a/tsdb/wlog/checkpoint.go +++ b/tsdb/wlog/checkpoint.go @@ -410,7 +410,7 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { return nil, err } - for i := 0; i < len(files); i++ { + for i := range files { fi := files[i] if !strings.HasPrefix(fi.Name(), CheckpointPrefix) { continue diff --git a/tsdb/wlog/reader_test.go b/tsdb/wlog/reader_test.go index 7d63c00013..1ddc33e2c8 100644 --- a/tsdb/wlog/reader_test.go +++ b/tsdb/wlog/reader_test.go @@ -240,7 +240,7 @@ const fuzzLen = 500 func generateRandomEntries(w *WL, records chan []byte) error { var recs [][]byte - for i := 0; i < fuzzLen; i++ { + for i := range fuzzLen { var sz int64 switch i % 5 { case 0, 1: diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 57a4bbe401..9e6ea65a7f 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -161,7 +161,7 @@ func TestTailSamples(t *testing.T) { }() // Write to the initial segment then checkpoint. - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { ref := i + 100 series := enc.Series([]record.RefSeries{ { @@ -171,7 +171,7 @@ func TestTailSamples(t *testing.T) { }, nil) require.NoError(t, w.Log(series)) - for j := 0; j < samplesCount; j++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { @@ -183,7 +183,7 @@ func TestTailSamples(t *testing.T) { require.NoError(t, w.Log(sample)) } - for j := 0; j < exemplarsCount; j++ { + for range exemplarsCount { inner := rand.Intn(ref + 1) exemplar := enc.Exemplars([]record.RefExemplar{ { @@ -196,7 +196,7 @@ func TestTailSamples(t *testing.T) { require.NoError(t, w.Log(exemplar)) } - for j := 0; j < histogramsCount; j++ { + for range histogramsCount { inner := rand.Intn(ref + 1) hist := &histogram.Histogram{ Schema: 2, @@ -308,7 +308,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { enc := record.Encoder{} - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { series := enc.Series([]record.RefSeries{ { Ref: chunks.HeadSeriesRef(i), @@ -316,7 +316,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { }, }, nil) recs = append(recs, series) - for j := 0; j < samplesCount; j++ { + for j := range samplesCount { sample := enc.Samples([]record.RefSample{ { Ref: chunks.HeadSeriesRef(j), @@ -375,7 +375,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { }() // Write to the initial segment then checkpoint. - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { ref := i + 100 series := enc.Series([]record.RefSeries{ { @@ -387,7 +387,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { // Add in an unknown record type, which should be ignored. require.NoError(t, w.Log([]byte{255})) - for j := 0; j < samplesCount; j++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { @@ -404,7 +404,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { w.Truncate(1) // Write more records after checkpointing. - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { series := enc.Series([]record.RefSeries{ { Ref: chunks.HeadSeriesRef(i), @@ -413,7 +413,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { }, nil) require.NoError(t, w.Log(series)) - for j := 0; j < samplesCount; j++ { + for j := range samplesCount { sample := enc.Samples([]record.RefSample{ { Ref: chunks.HeadSeriesRef(j), @@ -468,7 +468,7 @@ func TestReadCheckpoint(t *testing.T) { }) // Write to the initial segment then checkpoint. - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { ref := i + 100 series := enc.Series([]record.RefSeries{ { @@ -478,7 +478,7 @@ func TestReadCheckpoint(t *testing.T) { }, nil) require.NoError(t, w.Log(series)) - for j := 0; j < samplesCount; j++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { @@ -534,8 +534,8 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { require.NoError(t, err) // Write a bunch of data. - for i := 0; i < segments; i++ { - for j := 0; j < seriesCount; j++ { + for i := range segments { + for j := range seriesCount { ref := j + (i * 100) series := enc.Series([]record.RefSeries{ { @@ -545,7 +545,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { }, nil) require.NoError(t, w.Log(series)) - for k := 0; k < samplesCount; k++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { @@ -615,7 +615,7 @@ func TestCheckpointSeriesReset(t *testing.T) { }() // Write to the initial segment, then checkpoint later. - for i := 0; i < seriesCount; i++ { + for i := range seriesCount { ref := i + 100 series := enc.Series([]record.RefSeries{ { @@ -625,7 +625,7 @@ func TestCheckpointSeriesReset(t *testing.T) { }, nil) require.NoError(t, w.Log(series)) - for j := 0; j < samplesCount; j++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { @@ -696,8 +696,8 @@ func TestRun_StartupTime(t *testing.T) { w, err := NewSize(nil, nil, wdir, pageSize, compress) require.NoError(t, err) - for i := 0; i < segments; i++ { - for j := 0; j < seriesCount; j++ { + for i := range segments { + for j := range seriesCount { ref := j + (i * 100) series := enc.Series([]record.RefSeries{ { @@ -707,7 +707,7 @@ func TestRun_StartupTime(t *testing.T) { }, nil) require.NoError(t, w.Log(series)) - for k := 0; k < samplesCount; k++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { @@ -738,7 +738,7 @@ func TestRun_StartupTime(t *testing.T) { func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error { enc := record.Encoder{} - for j := 0; j < seriesCount; j++ { + for j := range seriesCount { ref := j + (segment * 100) series := enc.Series([]record.RefSeries{ { @@ -750,7 +750,7 @@ func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error { return err } - for k := 0; k < samplesCount; k++ { + for range samplesCount { inner := rand.Intn(ref + 1) sample := enc.Samples([]record.RefSample{ { diff --git a/tsdb/wlog/wlog_test.go b/tsdb/wlog/wlog_test.go index 758c4da4f4..7b556796f6 100644 --- a/tsdb/wlog/wlog_test.go +++ b/tsdb/wlog/wlog_test.go @@ -226,7 +226,7 @@ func TestCorruptAndCarryOn(t *testing.T) { w, err := NewSize(logger, nil, dir, segmentSize, compression.None) require.NoError(t, err) - for i := 0; i < 18; i++ { + for range 18 { buf := make([]byte, recordSize) _, err := rand.Read(buf) require.NoError(t, err) @@ -304,7 +304,7 @@ func TestCorruptAndCarryOn(t *testing.T) { require.Equal(t, 1, w.segment.Index()) // We corrupted segment 0. require.Equal(t, 0, w.donePages) - for i := 0; i < 5; i++ { + for range 5 { buf := make([]byte, recordSize) _, err := rand.Read(buf) require.NoError(t, err) @@ -356,7 +356,7 @@ func TestSegmentMetric(t *testing.T) { initialSegment := client_testutil.ToFloat64(w.metrics.currentSegment) // Write 3 records, each of which is half the segment size, meaning we should rotate to the next segment. - for i := 0; i < 3; i++ { + for range 3 { buf := make([]byte, recordSize) _, err := rand.Read(buf) require.NoError(t, err) @@ -383,7 +383,7 @@ func TestCompression(t *testing.T) { require.NoError(t, err) buf := make([]byte, recordSize) - for i := 0; i < records; i++ { + for range records { require.NoError(t, w.Log(buf)) } require.NoError(t, w.Close()) @@ -568,7 +568,7 @@ func BenchmarkWAL_Log(b *testing.B) { func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() - for i := 0; i < 2; i++ { + for range 2 { wl, err := New(promslog.NewNopLogger(), reg, t.TempDir(), compression.None) require.NoError(t, err) require.NoError(t, wl.Close()) diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 5888a3256a..37f983dde7 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -16,6 +16,7 @@ package annotations import ( "errors" "fmt" + "maps" "github.com/prometheus/common/model" @@ -55,9 +56,7 @@ func (a *Annotations) Merge(aa Annotations) Annotations { } *a = Annotations{} } - for key, val := range aa { - (*a)[key] = val - } + maps.Copy((*a), aa) return *a } diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index 42d96580a8..14382663ee 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -54,12 +54,12 @@ func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error { } func header(title, help string) []byte { - return []byte(fmt.Sprintf(`--- + return fmt.Appendf(nil, `--- title: %s --- %s -`, title, help)) +`, title, help) } func createFlagRow(flag *kingpin.FlagModel) []string { diff --git a/util/fmtutil/format.go b/util/fmtutil/format.go index a10908bb8c..a49341a6e5 100644 --- a/util/fmtutil/format.go +++ b/util/fmtutil/format.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "maps" "sort" "time" @@ -116,9 +117,7 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me // Preserve metric name order with first quantile labels timeseries then sum suffix timeseries and finally count suffix timeseries // Add Summary quantile timeseries quantileLabels := make(map[string]string, len(labels)+1) - for key, value := range labels { - quantileLabels[key] = value - } + maps.Copy(quantileLabels, labels) for _, q := range m.GetSummary().Quantile { quantileLabels[model.QuantileLabel] = fmt.Sprint(q.GetQuantile()) @@ -137,9 +136,7 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me // Preserve metric name order with first bucket suffix timeseries then sum suffix timeseries and finally count suffix timeseries // Add Histogram bucket timeseries bucketLabels := make(map[string]string, len(labels)+1) - for key, value := range labels { - bucketLabels[key] = value - } + maps.Copy(bucketLabels, labels) for _, b := range m.GetHistogram().Bucket { bucketLabels[model.MetricNameLabel] = metricName + bucketStr bucketLabels[model.BucketLabel] = fmt.Sprint(b.GetUpperBound()) @@ -186,9 +183,7 @@ func makeLabelsMap(m *dto.Metric, metricName string, extraLabels map[string]stri labels[model.MetricNameLabel] = metricName // add extra labels - for key, value := range extraLabels { - labels[key] = value - } + maps.Copy(labels, extraLabels) // add metric labels for _, label := range m.Label { diff --git a/util/httputil/context.go b/util/httputil/context.go index fddcfba941..9b16428892 100644 --- a/util/httputil/context.go +++ b/util/httputil/context.go @@ -41,7 +41,7 @@ func ContextFromRequest(ctx context.Context, r *http.Request) context.Context { if v := ctx.Value(pathParam{}); v != nil { path = v.(string) } - return promql.NewOriginContext(ctx, map[string]interface{}{ + return promql.NewOriginContext(ctx, map[string]any{ "httpRequest": map[string]string{ "clientIP": ip, "method": r.Method, diff --git a/util/logging/dedupe_test.go b/util/logging/dedupe_test.go index a8774aefd3..fed34b8a73 100644 --- a/util/logging/dedupe_test.go +++ b/util/logging/dedupe_test.go @@ -31,7 +31,7 @@ func TestDedupe(t *testing.T) { defer d.Stop() // Log 10 times quickly, ensure they are deduped. - for i := 0; i < 10; i++ { + for range 10 { dlog.Info("test", "hello", "world") } @@ -65,14 +65,14 @@ func TestDedupeConcurrent(t *testing.T) { concurrentWriteFunc := func() { go func() { dlog1 := dlog.With("writer", 1) - for i := 0; i < 10; i++ { + for range 10 { dlog1.With("foo", "bar").Info("test", "hello", "world") } }() go func() { dlog2 := dlog.With("writer", 2) - for i := 0; i < 10; i++ { + for range 10 { dlog2.With("foo", "bar").Info("test", "hello", "world") } }() diff --git a/util/notifications/notifications_test.go b/util/notifications/notifications_test.go index e487e9ce54..3d9ba6bb12 100644 --- a/util/notifications/notifications_test.go +++ b/util/notifications/notifications_test.go @@ -153,7 +153,7 @@ func TestMultipleSubscribers(t *testing.T) { require.Len(t, receivedSub2, 2, "Expected 2 notifications for subscriber 2.") // Verify that both subscribers received the same notifications. - for i := 0; i < 2; i++ { + for i := range 2 { require.Equal(t, receivedSub1[i], receivedSub2[i], "Subscriber notification mismatch at index %d.", i) } } diff --git a/util/pool/pool.go b/util/pool/pool.go index 2ee8971854..7d5a8e3abf 100644 --- a/util/pool/pool.go +++ b/util/pool/pool.go @@ -24,12 +24,12 @@ type Pool struct { buckets []sync.Pool sizes []int // make is the function used to create an empty slice when none exist yet. - make func(int) interface{} + make func(int) any } // New returns a new Pool with size buckets for minSize to maxSize // increasing by the given factor. -func New(minSize, maxSize int, factor float64, makeFunc func(int) interface{}) *Pool { +func New(minSize, maxSize int, factor float64, makeFunc func(int) any) *Pool { if minSize < 1 { panic("invalid minimum pool size") } @@ -56,7 +56,7 @@ func New(minSize, maxSize int, factor float64, makeFunc func(int) interface{}) * } // Get returns a new byte slices that fits the given size. -func (p *Pool) Get(sz int) interface{} { +func (p *Pool) Get(sz int) any { for i, bktSize := range p.sizes { if sz > bktSize { continue @@ -71,7 +71,7 @@ func (p *Pool) Get(sz int) interface{} { } // Put adds a slice to the right bucket in the pool. -func (p *Pool) Put(s interface{}) { +func (p *Pool) Put(s any) { slice := reflect.ValueOf(s) if slice.Kind() != reflect.Slice { diff --git a/util/pool/pool_test.go b/util/pool/pool_test.go index c002798942..e1ac13fb90 100644 --- a/util/pool/pool_test.go +++ b/util/pool/pool_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/require" ) -func makeFunc(size int) interface{} { +func makeFunc(size int) any { return make([]int, 0, size) } diff --git a/util/stats/query_stats.go b/util/stats/query_stats.go index f0e9b90a62..d8ec186f4c 100644 --- a/util/stats/query_stats.go +++ b/util/stats/query_stats.go @@ -89,7 +89,7 @@ func (s stepStat) String() string { // MarshalJSON implements json.Marshaler. func (s stepStat) MarshalJSON() ([]byte, error) { - return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V}) + return json.Marshal([...]any{float64(s.T) / 1000, s.V}) } // queryTimings with all query timers mapped to durations. diff --git a/util/strutil/quote.go b/util/strutil/quote.go index 95dcb6f694..66d9c88ffc 100644 --- a/util/strutil/quote.go +++ b/util/strutil/quote.go @@ -201,7 +201,7 @@ func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err = ErrSyntax return } - for j := 0; j < 2; j++ { // One digit already; two more. + for j := range 2 { // One digit already; two more. x := rune(s[j]) - '0' if x < 0 || x > 7 { err = ErrSyntax diff --git a/util/testutil/cmp.go b/util/testutil/cmp.go index 24d39d514c..3ea1f40168 100644 --- a/util/testutil/cmp.go +++ b/util/testutil/cmp.go @@ -25,13 +25,13 @@ import ( // RequireEqual is a replacement for require.Equal using go-cmp adapted for // Prometheus data structures, instead of DeepEqual. -func RequireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) { +func RequireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) { t.Helper() RequireEqualWithOptions(t, expected, actual, nil, msgAndArgs...) } // RequireEqualWithOptions works like RequireEqual but allows extra cmp.Options. -func RequireEqualWithOptions(t testing.TB, expected, actual interface{}, extra []cmp.Option, msgAndArgs ...interface{}) { +func RequireEqualWithOptions(t testing.TB, expected, actual any, extra []cmp.Option, msgAndArgs ...any) { t.Helper() options := append([]cmp.Option{cmp.Comparer(labels.Equal)}, extra...) if cmp.Equal(expected, actual, options...) { diff --git a/util/testutil/context.go b/util/testutil/context.go index ca137ceeb6..3d2a09d637 100644 --- a/util/testutil/context.go +++ b/util/testutil/context.go @@ -42,7 +42,7 @@ func (c *MockContext) Err() error { } // Value ignores the Value and always returns nil. -func (*MockContext) Value(interface{}) interface{} { +func (*MockContext) Value(any) any { return nil } diff --git a/util/testutil/directory.go b/util/testutil/directory.go index 2f2af69cd3..d8c2f74bdb 100644 --- a/util/testutil/directory.go +++ b/util/testutil/directory.go @@ -72,7 +72,7 @@ type ( // the test flags, which we do not want in non-test binaries even if // they make use of these utilities for some reason). T interface { - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) FailNow() } ) diff --git a/util/treecache/treecache.go b/util/treecache/treecache.go index 4d4b6f544c..86fd207074 100644 --- a/util/treecache/treecache.go +++ b/util/treecache/treecache.go @@ -57,7 +57,7 @@ func NewZookeeperLogger(logger *slog.Logger) ZookeeperLogger { } // Printf implements zk.Logger. -func (zl ZookeeperLogger) Printf(s string, i ...interface{}) { +func (zl ZookeeperLogger) Printf(s string, i ...any) { zl.logger.Info(s, i...) } diff --git a/util/zeropool/pool.go b/util/zeropool/pool.go index 4f6deddfb1..946ce02091 100644 --- a/util/zeropool/pool.go +++ b/util/zeropool/pool.go @@ -39,7 +39,7 @@ type Pool[T any] struct { func New[T any](item func() T) Pool[T] { return Pool[T]{ items: sync.Pool{ - New: func() interface{} { + New: func() any { val := item() return &val }, diff --git a/util/zeropool/pool_test.go b/util/zeropool/pool_test.go index e9793f64d7..d5196ba957 100644 --- a/util/zeropool/pool_test.go +++ b/util/zeropool/pool_test.go @@ -51,7 +51,7 @@ func TestPool(t *testing.T) { var counter atomic.Int64 do := make(chan struct{}, 1e6) - for i := 0; i < iterations; i++ { + for range int(iterations) { do <- struct{}{} } close(do) @@ -59,7 +59,7 @@ func TestPool(t *testing.T) { run := make(chan struct{}) done := sync.WaitGroup{} done.Add(concurrency) - for i := 0; i < concurrency; i++ { + for i := range concurrency { go func(worker int) { <-run for range do { diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 69f5ce58dd..5bdf0f3b26 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -183,16 +183,16 @@ type RuntimeInfo struct { // Response contains a response to a HTTP API request. type Response struct { - Status status `json:"status"` - Data interface{} `json:"data,omitempty"` - ErrorType string `json:"errorType,omitempty"` - Error string `json:"error,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Infos []string `json:"infos,omitempty"` + Status status `json:"status"` + Data any `json:"data,omitempty"` + ErrorType string `json:"errorType,omitempty"` + Error string `json:"error,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Infos []string `json:"infos,omitempty"` } type apiFuncResult struct { - data interface{} + data any err *apiError warnings annotations.Annotations finalizer func() @@ -1468,7 +1468,7 @@ type RuleGroup struct { LastEvaluation time.Time `json:"lastEvaluation"` } -type Rule interface{} +type Rule any type AlertingRule struct { // State can be "pending", "firing", "inactive". @@ -2002,7 +2002,7 @@ func (api *API) cleanTombstones(*http.Request) apiFuncResult { // Query string is needed to get the position information for the annotations, and it // can be empty if the position information isn't needed. -func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) { +func (api *API) respond(w http.ResponseWriter, req *http.Request, data any, warnings annotations.Annotations, query string) { statusMessage := statusSuccess warn, info := warnings.AsStrings(query, 10, 10) @@ -2050,7 +2050,7 @@ func (api *API) negotiateCodec(req *http.Request, resp *Response) (Codec, error) return defaultCodec, nil } -func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) { +func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data any) { json := jsoniter.ConfigCompatibleWithStandardLibrary b, err := json.Marshal(&Response{ Status: statusError, diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 107ed8bab1..3c23d9f5c8 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -909,12 +909,12 @@ func TestStats(t *testing.T) { name string renderer StatsRenderer param string - expected func(*testing.T, interface{}) + expected func(*testing.T, any) }{ { name: "stats is blank", param: "", - expected: func(t *testing.T, i interface{}) { + expected: func(t *testing.T, i any) { require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.Nil(t, qd.Stats) @@ -923,7 +923,7 @@ func TestStats(t *testing.T) { { name: "stats is true", param: "true", - expected: func(t *testing.T, i interface{}) { + expected: func(t *testing.T, i any) { require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) @@ -938,7 +938,7 @@ func TestStats(t *testing.T) { { name: "stats is all", param: "all", - expected: func(t *testing.T, i interface{}) { + expected: func(t *testing.T, i any) { require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) @@ -959,7 +959,7 @@ func TestStats(t *testing.T) { return nil }, param: "known", - expected: func(t *testing.T, i interface{}) { + expected: func(t *testing.T, i any) { require.IsType(t, &QueryData{}, i) qd := i.(*QueryData) require.NotNil(t, qd.Stats) @@ -1108,19 +1108,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E endpoint apiFunc params map[string]string query url.Values - response interface{} + response any responseLen int // If nonzero, check only the length; `response` is ignored. responseMetadataTotal int responseAsJSON string warningsCount int errType errorType - sorter func(interface{}) + sorter func(any) metadata []targetMetadata exemplars []exemplar.QueryResult - zeroFunc func(interface{}) + zeroFunc func(any) } - rulesZeroFunc := func(i interface{}) { + rulesZeroFunc := func(i any) { if i != nil { v := i.(*RuleDiscovery) for _, ruleGroup := range v.RuleGroups { @@ -1991,7 +1991,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E Unit: "", }, }, - sorter: func(m interface{}) { + sorter: func(m any) { sort.Slice(m.([]metricMetadata), func(i, j int) bool { s := m.([]metricMetadata) return s[i].MetricFamily < s[j].MetricFamily @@ -2120,7 +2120,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E responseAsJSON: `{"go_threads": [{"type":"gauge","unit":"", "help":"Number of OS threads created"},{"type":"gauge","unit":"", "help":"Number of OS threads that were created."}]}`, - sorter: func(m interface{}) { + sorter: func(m any) { v := m.(map[string][]metadata.Metadata)["go_threads"] sort.Slice(v, func(i, j int) bool { @@ -2328,7 +2328,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, responseAsJSON: `{"go_threads": [{"type":"gauge","unit":"","help":"Number of OS threads created"},{"type":"gauge","unit":"","help":"Number of OS threads that were created."}]}`, - sorter: func(m interface{}) { + sorter: func(m any) { v := m.(map[string][]metadata.Metadata)["go_threads"] sort.Slice(v, func(i, j int) bool { @@ -2382,7 +2382,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E }, }, }, - zeroFunc: func(i interface{}) { + zeroFunc: func(i any) { if i != nil { v := i.(*AlertDiscovery) for _, alert := range v.Alerts { @@ -3769,20 +3769,20 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) { } } -func assertAPIResponse(t *testing.T, got, exp interface{}) { +func assertAPIResponse(t *testing.T, got, exp any) { t.Helper() testutil.RequireEqual(t, exp, got) } -func assertAPIResponseLength(t *testing.T, got interface{}, expLen int) { +func assertAPIResponseLength(t *testing.T, got any, expLen int) { t.Helper() gotLen := reflect.ValueOf(got).Len() require.Equal(t, expLen, gotLen, "Response length does not match") } -func assertAPIResponseMetadataLen(t *testing.T, got interface{}, expLen int) { +func assertAPIResponseMetadataLen(t *testing.T, got any, expLen int) { t.Helper() var gotLen int @@ -3988,7 +3988,6 @@ func TestAdminEndpoints(t *testing.T) { errType: errorUnavailable, }, } { - tc := tc t.Run("", func(t *testing.T) { dir := t.TempDir() @@ -4411,7 +4410,6 @@ func TestTSDBStatus(t *testing.T) { errType: errorBadData, }, } { - tc := tc t.Run(strconv.Itoa(i), func(t *testing.T) { api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer} endpoint := tc.endpoint(api) @@ -4467,11 +4465,11 @@ var testResponseWriter = httptest.ResponseRecorder{} func BenchmarkRespond(b *testing.B) { points := []promql.FPoint{} - for i := 0; i < 10000; i++ { + for i := range 10000 { points = append(points, promql.FPoint{F: float64(i * 1000000), T: int64(i)}) } matrix := promql.Matrix{} - for i := 0; i < 1000; i++ { + for i := range 1000 { matrix = append(matrix, promql.Series{ Metric: labels.FromStrings("__name__", fmt.Sprintf("series%v", i), "label", fmt.Sprintf("series%v", i), @@ -4480,7 +4478,7 @@ func BenchmarkRespond(b *testing.B) { }) } series := []labels.Labels{} - for i := 0; i < 1000; i++ { + for i := range 1000 { series = append(series, labels.FromStrings("__name__", fmt.Sprintf("series%v", i), "label", fmt.Sprintf("series%v", i), "label2", fmt.Sprintf("series%v", i))) @@ -4488,7 +4486,7 @@ func BenchmarkRespond(b *testing.B) { cases := []struct { name string - response interface{} + response any }{ {name: "10000 points no labels", response: &QueryData{ ResultType: parser.ValueTypeMatrix, @@ -4642,7 +4640,7 @@ func (t *testCodec) CanEncode(*Response) bool { } func (t *testCodec) Encode(*Response) ([]byte, error) { - return []byte(fmt.Sprintf("response from %v codec", t.contentType)), nil + return fmt.Appendf(nil, "response from %v codec", t.contentType), nil } func TestExtractQueryOpts(t *testing.T) { diff --git a/web/api/v1/json_codec_test.go b/web/api/v1/json_codec_test.go index 759dabd28e..f0a671d6d1 100644 --- a/web/api/v1/json_codec_test.go +++ b/web/api/v1/json_codec_test.go @@ -26,7 +26,7 @@ import ( func TestJsonCodec_Encode(t *testing.T) { cases := []struct { - response interface{} + response any expected string }{ { diff --git a/web/api/v1/translate_ast.go b/web/api/v1/translate_ast.go index afa11f16b9..b3f5eda212 100644 --- a/web/api/v1/translate_ast.go +++ b/web/api/v1/translate_ast.go @@ -24,14 +24,14 @@ import ( // for the tree view in the UI. // TODO: Could it make sense to do this via the normal JSON marshalling methods? Maybe // too UI-specific though. -func translateAST(node parser.Expr) interface{} { +func translateAST(node parser.Expr) any { if node == nil { return nil } switch n := node.(type) { case *parser.AggregateExpr: - return map[string]interface{}{ + return map[string]any{ "type": "aggregation", "op": n.Op.String(), "expr": translateAST(n.Expr), @@ -40,9 +40,9 @@ func translateAST(node parser.Expr) interface{} { "without": n.Without, } case *parser.BinaryExpr: - var matching interface{} + var matching any if m := n.VectorMatching; m != nil { - matching = map[string]interface{}{ + matching = map[string]any{ "card": m.Card.String(), "labels": sanitizeList(m.MatchingLabels), "on": m.On, @@ -50,7 +50,7 @@ func translateAST(node parser.Expr) interface{} { } } - return map[string]interface{}{ + return map[string]any{ "type": "binaryExpr", "op": n.Op.String(), "lhs": translateAST(n.LHS), @@ -59,14 +59,14 @@ func translateAST(node parser.Expr) interface{} { "bool": n.ReturnBool, } case *parser.Call: - args := []interface{}{} + args := []any{} for _, arg := range n.Args { args = append(args, translateAST(arg)) } - return map[string]interface{}{ + return map[string]any{ "type": "call", - "func": map[string]interface{}{ + "func": map[string]any{ "name": n.Func.Name, "argTypes": n.Func.ArgTypes, "variadic": n.Func.Variadic, @@ -76,7 +76,7 @@ func translateAST(node parser.Expr) interface{} { } case *parser.MatrixSelector: vs := n.VectorSelector.(*parser.VectorSelector) - return map[string]interface{}{ + return map[string]any{ "type": "matrixSelector", "name": vs.Name, "range": n.Range.Milliseconds(), @@ -86,7 +86,7 @@ func translateAST(node parser.Expr) interface{} { "startOrEnd": getStartOrEnd(vs.StartOrEnd), } case *parser.SubqueryExpr: - return map[string]interface{}{ + return map[string]any{ "type": "subquery", "expr": translateAST(n.Expr), "range": n.Range.Milliseconds(), @@ -101,23 +101,23 @@ func translateAST(node parser.Expr) interface{} { "val": strconv.FormatFloat(n.Val, 'f', -1, 64), } case *parser.ParenExpr: - return map[string]interface{}{ + return map[string]any{ "type": "parenExpr", "expr": translateAST(n.Expr), } case *parser.StringLiteral: - return map[string]interface{}{ + return map[string]any{ "type": "stringLiteral", "val": n.Val, } case *parser.UnaryExpr: - return map[string]interface{}{ + return map[string]any{ "type": "unaryExpr", "op": n.Op.String(), "expr": translateAST(n.Expr), } case *parser.VectorSelector: - return map[string]interface{}{ + return map[string]any{ "type": "vectorSelector", "name": n.Name, "offset": n.OriginalOffset.Milliseconds(), @@ -136,10 +136,10 @@ func sanitizeList(l []string) []string { return l } -func translateMatchers(in []*labels.Matcher) interface{} { - out := []map[string]interface{}{} +func translateMatchers(in []*labels.Matcher) any { + out := []map[string]any{} for _, m := range in { - out = append(out, map[string]interface{}{ + out = append(out, map[string]any{ "name": m.Name, "value": m.Value, "type": m.Type.String(), @@ -148,7 +148,7 @@ func translateMatchers(in []*labels.Matcher) interface{} { return out } -func getStartOrEnd(startOrEnd parser.ItemType) interface{} { +func getStartOrEnd(startOrEnd parser.ItemType) any { if startOrEnd == 0 { return nil } diff --git a/web/federate_test.go b/web/federate_test.go index 7bebf506de..97aa45edba 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -341,7 +341,7 @@ func TestFederationWithNativeHistograms(t *testing.T) { NegativeBuckets: []int64{2, 2, -2, 0}, } app := db.Appender(context.Background()) - for i := 0; i < 6; i++ { + for i := range 6 { l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i)) expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i)) var err error diff --git a/web/web_test.go b/web/web_test.go index 25145a3fc6..c52c3af90c 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -487,7 +487,7 @@ func TestHandleMultipleQuitRequests(t *testing.T) { start := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 3; i++ { + for range 3 { wg.Add(1) go func() { defer wg.Done()