Merge pull request #17092 from prometheus/beorn7/cleanup

Apply analyzer modernize to the whole codebase
This commit is contained in:
Björn Rabenstein 2025-08-28 00:42:33 +02:00 committed by GitHub
commit ba808d1736
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
174 changed files with 749 additions and 833 deletions

View File

@ -673,7 +673,7 @@ func main() {
// Set Go runtime parameters before we get too far into initialization.
updateGoGC(cfgFile, logger)
if cfg.maxprocsEnable {
l := func(format string, a ...interface{}) {
l := func(format string, a ...any) {
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
}
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {

View File

@ -202,7 +202,6 @@ func TestSendAlerts(t *testing.T) {
}
for i, tc := range testCases {
tc := tc
t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
require.NotEmpty(t, tc.in, "sender called with 0 alert")

View File

@ -53,7 +53,7 @@ func TestStartupInterrupt(t *testing.T) {
url := "http://localhost" + port + "/graph"
Loop:
for x := 0; x < 10; x++ {
for range 10 {
// error=nil means prometheus has started, so we can send the interrupt
// signal and wait for the graceful shutdown.
if _, err := http.Get(url); err == nil {

View File

@ -70,7 +70,7 @@ func (p *queryLogTest) skip(t *testing.T) {
// waitForPrometheus waits for Prometheus to be ready.
func (p *queryLogTest) waitForPrometheus() error {
var err error
for x := 0; x < 20; x++ {
for range 20 {
var r *http.Response
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
break

View File

@ -207,7 +207,7 @@ func calcClassicBucketStatistics(matrix model.Matrix) (*statistics, error) {
sortMatrix(matrix)
totalPop := 0
for timeIdx := 0; timeIdx < numSamples; timeIdx++ {
for timeIdx := range numSamples {
curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx)
if err != nil {
return stats, err

View File

@ -155,10 +155,7 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
var wg sync.WaitGroup
lbls := lbls
for len(lbls) > 0 {
l := 1000
if len(lbls) < 1000 {
l = len(lbls)
}
l := min(len(lbls), 1000)
batch := lbls[:l]
lbls = lbls[l:]
@ -200,7 +197,7 @@ func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount in
}
total := uint64(0)
for i := 0; i < scrapeCount; i++ {
for range scrapeCount {
app := b.storage.Appender(context.TODO())
ts += timeDelta

View File

@ -22,6 +22,7 @@ import (
"math"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
@ -278,9 +279,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
for k := range alertEvalTimesMap {
alertEvalTimes = append(alertEvalTimes, k)
}
sort.Slice(alertEvalTimes, func(i, j int) bool {
return alertEvalTimes[i] < alertEvalTimes[j]
})
slices.Sort(alertEvalTimes)
// Current index in alertEvalTimes what we are looking at.
curr := 0

View File

@ -367,7 +367,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
// UnmarshalYAML implements the yaml.Unmarshaler interface.
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultConfig
// We want to set c to the defaults and then overwrite it with the input.
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
@ -594,7 +594,7 @@ func (c *GlobalConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
gc := &GlobalConfig{}
@ -630,11 +630,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return errors.New("global scrape timeout greater than scrape interval")
}
if gc.ScrapeTimeout == 0 {
if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval {
gc.ScrapeTimeout = gc.ScrapeInterval
} else {
gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout
}
gc.ScrapeTimeout = min(DefaultGlobalConfig.ScrapeTimeout, gc.ScrapeInterval)
}
if gc.EvaluationInterval == 0 {
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
@ -790,7 +786,7 @@ func (c *ScrapeConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultScrapeConfig
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
return err
@ -841,11 +837,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
}
if c.ScrapeTimeout == 0 {
if globalConfig.ScrapeTimeout > c.ScrapeInterval {
c.ScrapeTimeout = c.ScrapeInterval
} else {
c.ScrapeTimeout = globalConfig.ScrapeTimeout
}
c.ScrapeTimeout = min(globalConfig.ScrapeTimeout, c.ScrapeInterval)
}
if c.BodySizeLimit == 0 {
c.BodySizeLimit = globalConfig.BodySizeLimit
@ -970,7 +962,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
func (c *ScrapeConfig) MarshalYAML() (any, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
@ -1024,7 +1016,7 @@ type TSDBConfig struct {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (t *TSDBConfig) UnmarshalYAML(unmarshal func(any) error) error {
*t = TSDBConfig{}
type plain TSDBConfig
if err := unmarshal((*plain)(t)); err != nil {
@ -1046,7 +1038,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (t *TracingClientType) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (t *TracingClientType) UnmarshalYAML(unmarshal func(any) error) error {
*t = TracingClientType("")
type plain TracingClientType
if err := unmarshal((*plain)(t)); err != nil {
@ -1080,7 +1072,7 @@ func (t *TracingConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (t *TracingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (t *TracingConfig) UnmarshalYAML(unmarshal func(any) error) error {
*t = TracingConfig{
ClientType: TracingClientGRPC,
}
@ -1140,7 +1132,7 @@ func (c *AlertingConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(any) error) error {
// Create a clean global config as the previous one was already populated
// by the default due to the YAML parser behavior for empty blocks.
*c = AlertingConfig{}
@ -1175,7 +1167,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
type AlertmanagerAPIVersion string
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(any) error) error {
*v = AlertmanagerAPIVersion("")
type plain AlertmanagerAPIVersion
if err := unmarshal((*plain)(v)); err != nil {
@ -1234,7 +1226,7 @@ func (c *AlertmanagerConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultAlertmanagerConfig
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
return err
@ -1291,7 +1283,7 @@ func (c *AlertmanagerConfig) Validate(nameValidationScheme model.ValidationSchem
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) {
func (c *AlertmanagerConfig) MarshalYAML() (any, error) {
return discovery.MarshalYAMLWithInlineConfigs(c)
}
@ -1395,7 +1387,7 @@ func (c *RemoteWriteConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRemoteWriteConfig
type plain RemoteWriteConfig
if err := unmarshal((*plain)(c)); err != nil {
@ -1560,7 +1552,7 @@ func (c *RemoteReadConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRemoteReadConfig
type plain RemoteReadConfig
if err := unmarshal((*plain)(c)); err != nil {
@ -1620,7 +1612,7 @@ type OTLPConfig struct {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultOTLPConfig
type plain OTLPConfig
if err := unmarshal((*plain)(c)); err != nil {

View File

@ -116,7 +116,7 @@ func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultEC2SDConfig
type plain EC2SDConfig
err := unmarshal((*plain)(c))

View File

@ -98,7 +98,7 @@ func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (dis
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultLightsailSDConfig
type plain LightsailSDConfig
err := unmarshal((*plain)(c))

View File

@ -138,7 +138,7 @@ func validateAuthParam(param, name string) error {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -142,7 +142,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -437,8 +437,8 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
}
func TestUnmarshalConfig(t *testing.T) {
unmarshal := func(d []byte) func(interface{}) error {
return func(o interface{}) error {
unmarshal := func(d []byte) func(any) error {
return func(o any) error {
return yaml.Unmarshal(d, o)
}
}

View File

@ -93,7 +93,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -108,7 +108,7 @@ func (c *Configs) SetDirectory(dir string) {
}
// UnmarshalYAML implements yaml.Unmarshaler.
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Configs) UnmarshalYAML(unmarshal func(any) error) error {
cfgTyp := reflect.StructOf(configFields)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()
@ -123,7 +123,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
func (c Configs) MarshalYAML() (interface{}, error) {
func (c Configs) MarshalYAML() (any, error) {
cfgTyp := reflect.StructOf(configFields)
cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem()

View File

@ -82,7 +82,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -251,7 +251,6 @@ func TestDNS(t *testing.T) {
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
@ -282,8 +281,8 @@ func TestSDConfigUnmarshalYAML(t *testing.T) {
return d
}
unmarshal := func(d []byte) func(interface{}) error {
return func(o interface{}) error {
unmarshal := func(d []byte) func(any) error {
return func(o any) error {
return yaml.Unmarshal(d, o)
}
}

View File

@ -97,7 +97,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"log/slog"
"maps"
"os"
"path/filepath"
"strings"
@ -78,7 +79,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
@ -120,9 +121,7 @@ func (t *TimestampCollector) Collect(ch chan<- prometheus.Metric) {
t.lock.RLock()
for fileSD := range t.discoverers {
fileSD.lock.RLock()
for filename, timestamp := range fileSD.timestamps {
uniqueFiles[filename] = timestamp
}
maps.Copy(uniqueFiles, fileSD.timestamps)
fileSD.lock.RUnlock()
}
t.lock.RUnlock()

View File

@ -327,7 +327,6 @@ func TestInitialUpdate(t *testing.T) {
"fixtures/valid.yml",
"fixtures/valid.json",
} {
tc := tc
t.Run(tc, func(t *testing.T) {
t.Parallel()
@ -348,7 +347,6 @@ func TestInvalidFile(t *testing.T) {
"fixtures/invalid_nil.yml",
"fixtures/invalid_nil.json",
} {
tc := tc
t.Run(tc, func(t *testing.T) {
t.Parallel()

View File

@ -98,7 +98,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -99,7 +99,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
if err := unmarshal((*string)(c)); err != nil {
return err
}
@ -112,7 +112,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -78,7 +78,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -106,7 +106,7 @@ func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -83,15 +83,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
}
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
epAddCount.Inc()
e.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
epUpdateCount.Inc()
e.enqueue(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
epDeleteCount.Inc()
e.enqueue(o)
},
@ -100,7 +100,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
l.Error("Error adding endpoints event handler.", "err", err)
}
serviceUpdate := func(o interface{}) {
serviceUpdate := func(o any) {
svc, err := convertToService(o)
if err != nil {
e.logger.Error("converting to Service object failed", "err", err)
@ -119,15 +119,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
// TODO(fabxc): potentially remove add and delete event handlers. Those should
// be triggered via the endpoint handlers already.
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
svcAddCount.Inc()
serviceUpdate(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
svcUpdateCount.Inc()
serviceUpdate(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
svcDeleteCount.Inc()
serviceUpdate(o)
},
@ -136,7 +136,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
l.Error("Error adding services event handler.", "err", err)
}
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) {
UpdateFunc: func(old, cur any) {
podUpdateCount.Inc()
oldPod, ok := old.(*apiv1.Pod)
if !ok {
@ -160,15 +160,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
}
if e.withNodeMetadata {
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
node := o.(*apiv1.Node)
e.enqueueNode(node.Name)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
node := o.(*apiv1.Node)
e.enqueueNode(node.Name)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
nodeName, err := nodeName(o)
if err != nil {
l.Error("Error getting Node name", "err", err)
@ -183,7 +183,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
if e.withNamespaceMetadata {
_, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
namespace := o.(*apiv1.Namespace)
e.enqueueNamespace(namespace.Name)
},
@ -234,7 +234,7 @@ func (e *Endpoints) enqueuePod(podNamespacedName string) {
}
}
func (e *Endpoints) enqueue(obj interface{}) {
func (e *Endpoints) enqueue(obj any) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
return
@ -303,7 +303,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
return true
}
func convertToEndpoints(o interface{}) (*apiv1.Endpoints, error) {
func convertToEndpoints(o any) (*apiv1.Endpoints, error) {
endpoints, ok := o.(*apiv1.Endpoints)
if ok {
return endpoints, nil

View File

@ -83,15 +83,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
}
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
epslAddCount.Inc()
e.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
epslUpdateCount.Inc()
e.enqueue(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
epslDeleteCount.Inc()
e.enqueue(o)
},
@ -100,7 +100,7 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
l.Error("Error adding endpoint slices event handler.", "err", err)
}
serviceUpdate := func(o interface{}) {
serviceUpdate := func(o any) {
svc, err := convertToService(o)
if err != nil {
e.logger.Error("converting to Service object failed", "err", err)
@ -118,15 +118,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
}
}
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
svcAddCount.Inc()
serviceUpdate(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
svcUpdateCount.Inc()
serviceUpdate(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
svcDeleteCount.Inc()
serviceUpdate(o)
},
@ -137,15 +137,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
if e.withNodeMetadata {
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
node := o.(*apiv1.Node)
e.enqueueNode(node.Name)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
node := o.(*apiv1.Node)
e.enqueueNode(node.Name)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
nodeName, err := nodeName(o)
if err != nil {
l.Error("Error getting Node name", "err", err)
@ -160,7 +160,7 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
if e.withNamespaceMetadata {
_, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
namespace := o.(*apiv1.Namespace)
e.enqueueNamespace(namespace.Name)
},
@ -199,7 +199,7 @@ func (e *EndpointSlice) enqueueNamespace(namespace string) {
}
}
func (e *EndpointSlice) enqueue(obj interface{}) {
func (e *EndpointSlice) enqueue(obj any) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
return

View File

@ -56,15 +56,15 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
}
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
ingressAddCount.Inc()
s.enqueue(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
ingressDeleteCount.Inc()
s.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
ingressUpdateCount.Inc()
s.enqueue(o)
},
@ -75,7 +75,7 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
if s.withNamespaceMetadata {
_, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
namespace := o.(*apiv1.Namespace)
s.enqueueNamespace(namespace.Name)
},
@ -90,7 +90,7 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
return s
}
func (i *Ingress) enqueue(obj interface{}) {
func (i *Ingress) enqueue(obj any) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
return

View File

@ -16,6 +16,7 @@ package kubernetes
import (
"context"
"fmt"
"maps"
"testing"
"github.com/prometheus/common/model"
@ -193,9 +194,7 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
expected := expectedTargetGroups("ns1", TLSNo)
for k, v := range expectedTargetGroups("ns2", TLSNo) {
expected[k] = v
}
maps.Copy(expected, expectedTargetGroups("ns2", TLSNo))
k8sDiscoveryTest{
discovery: n,
afterStart: func() {

View File

@ -80,7 +80,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
if err := unmarshal((*string)(c)); err != nil {
return err
}
@ -160,7 +160,7 @@ type AttachMetadataConfig struct {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
@ -234,7 +234,7 @@ type NamespaceDiscovery struct {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(any) error) error {
*c = NamespaceDiscovery{}
type plain NamespaceDiscovery
return unmarshal((*plain)(c))
@ -698,7 +698,7 @@ func (d *Discovery) newNamespaceInformer(ctx context.Context) cache.SharedInform
func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc)
if d.attachMetadata.Node {
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
indexers[nodeIndex] = func(obj any) ([]string, error) {
pod, ok := obj.(*apiv1.Pod)
if !ok {
return nil, errors.New("object is not a pod")
@ -716,7 +716,7 @@ func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedInd
func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc)
indexers[podIndex] = func(obj interface{}) ([]string, error) {
indexers[podIndex] = func(obj any) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, errors.New("object is not endpoints")
@ -733,7 +733,7 @@ func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.Shar
}
if d.attachMetadata.Node {
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
indexers[nodeIndex] = func(obj any) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, errors.New("object is not endpoints")
@ -766,7 +766,7 @@ func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.Shar
func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc)
indexers[serviceIndex] = func(obj interface{}) ([]string, error) {
indexers[serviceIndex] = func(obj any) ([]string, error) {
e, ok := obj.(*disv1.EndpointSlice)
if !ok {
return nil, errors.New("object is not an endpointslice")
@ -781,7 +781,7 @@ func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, objec
}
if d.attachMetadata.Node {
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
indexers[nodeIndex] = func(obj any) ([]string, error) {
e, ok := obj.(*disv1.EndpointSlice)
if !ok {
return nil, errors.New("object is not an endpointslice")
@ -886,7 +886,7 @@ func namespacedName(namespace, name string) string {
// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone.
// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key.
func nodeName(o interface{}) (string, error) {
func nodeName(o any) (string, error) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o)
if err != nil {
return "", err

View File

@ -302,7 +302,6 @@ func TestFailuresCountMetric(t *testing.T) {
}
for _, tc := range tests {
tc := tc
t.Run(string(tc.role), func(t *testing.T) {
t.Parallel()

View File

@ -62,15 +62,15 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
}
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
nodeAddCount.Inc()
n.enqueue(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
nodeDeleteCount.Inc()
n.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
nodeUpdateCount.Inc()
n.enqueue(o)
},
@ -81,7 +81,7 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
return n
}
func (n *Node) enqueue(obj interface{}) {
func (n *Node) enqueue(obj any) {
key, err := nodeName(obj)
if err != nil {
return
@ -140,7 +140,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
return true
}
func convertToNode(o interface{}) (*apiv1.Node, error) {
func convertToNode(o any) (*apiv1.Node, error) {
node, ok := o.(*apiv1.Node)
if ok {
return node, nil

View File

@ -71,15 +71,15 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
queue: workqueue.NewNamed(RolePod.String()),
}
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
podAddCount.Inc()
p.enqueue(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
podDeleteCount.Inc()
p.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
podUpdateCount.Inc()
p.enqueue(o)
},
@ -90,15 +90,15 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
if p.withNodeMetadata {
_, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
node := o.(*apiv1.Node)
p.enqueuePodsForNode(node.Name)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
node := o.(*apiv1.Node)
p.enqueuePodsForNode(node.Name)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
nodeName, err := nodeName(o)
if err != nil {
l.Error("Error getting Node name", "err", err)
@ -113,7 +113,7 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
if p.withNamespaceMetadata {
_, err = p.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
namespace := o.(*apiv1.Namespace)
p.enqueuePodsForNamespace(namespace.Name)
},
@ -128,7 +128,7 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
return p
}
func (p *Pod) enqueue(obj interface{}) {
func (p *Pod) enqueue(obj any) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
return
@ -195,7 +195,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
return true
}
func convertToPod(o interface{}) (*apiv1.Pod, error) {
func convertToPod(o any) (*apiv1.Pod, error) {
pod, ok := o.(*apiv1.Pod)
if ok {
return pod, nil

View File

@ -16,6 +16,7 @@ package kubernetes
import (
"context"
"fmt"
"maps"
"testing"
"github.com/prometheus/common/model"
@ -437,9 +438,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) {
n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
expected := expectedPodTargetGroups("ns1")
for k, v := range expectedPodTargetGroups("ns2") {
expected[k] = v
}
maps.Copy(expected, expectedPodTargetGroups("ns2"))
k8sDiscoveryTest{
discovery: n,
beforeRun: func() {

View File

@ -61,15 +61,15 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
}
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) {
AddFunc: func(o any) {
svcAddCount.Inc()
s.enqueue(o)
},
DeleteFunc: func(o interface{}) {
DeleteFunc: func(o any) {
svcDeleteCount.Inc()
s.enqueue(o)
},
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
svcUpdateCount.Inc()
s.enqueue(o)
},
@ -80,7 +80,7 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
if s.withNamespaceMetadata {
_, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, o interface{}) {
UpdateFunc: func(_, o any) {
namespace := o.(*apiv1.Namespace)
s.enqueueNamespace(namespace.Name)
},
@ -95,7 +95,7 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
return s
}
func (s *Service) enqueue(obj interface{}) {
func (s *Service) enqueue(obj any) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
return
@ -171,7 +171,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
return true
}
func convertToService(o interface{}) (*apiv1.Service, error) {
func convertToService(o any) (*apiv1.Service, error) {
service, ok := o.(*apiv1.Service)
if ok {
return service, nil

View File

@ -112,7 +112,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"log/slog"
"maps"
"reflect"
"sync"
"time"
@ -37,7 +38,7 @@ type poolKey struct {
type Provider struct {
name string
d Discoverer
config interface{}
config any
cancel context.CancelFunc
// done should be called after cleaning up resources associated with cancelled provider.
@ -62,7 +63,7 @@ func (p *Provider) IsStarted() bool {
return p.cancel != nil
}
func (p *Provider) Config() interface{} {
func (p *Provider) Config() any {
return p.config
}
@ -255,9 +256,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
}
if l := len(refTargets); l > 0 {
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
for k, v := range refTargets {
m.targets[poolKey{s, prov.name}][k] = v
}
maps.Copy(m.targets[poolKey{s, prov.name}], refTargets)
}
}
m.targetsMtx.Unlock()

View File

@ -668,7 +668,6 @@ func TestTargetUpdatesOrder(t *testing.T) {
}
for i, tc := range testCases {
tc := tc
t.Run(tc.title, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -1350,7 +1349,6 @@ func TestCoordinationWithReceiver(t *testing.T) {
}
for _, tc := range testCases {
tc := tc
t.Run(tc.title, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -1471,7 +1469,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
wg.Add(2000)
start := make(chan struct{})
for i := 0; i < 1000; i++ {
for range 1000 {
go func() {
<-start
td.update([]*targetgroup.Group{
@ -1485,7 +1483,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
}()
}
for i := 0; i < 1000; i++ {
for i := range 1000 {
go func(i int) {
<-start
c := map[string]Configs{
@ -1545,7 +1543,7 @@ func (t *testDiscoverer) update(tgs []*targetgroup.Group) {
func TestUnregisterMetrics(t *testing.T) {
reg := prometheus.NewRegistry()
// Check that all metrics can be unregistered, allowing a second manager to be created.
for i := 0; i < 2; i++ {
for range 2 {
ctx, cancel := context.WithCancel(context.Background())
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)

View File

@ -101,7 +101,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
@ -513,7 +513,7 @@ func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32
ports := make([]uint32, len(portMappings))
labels := make([]map[string]string, len(portMappings))
for i := 0; i < len(portMappings); i++ {
for i := range portMappings {
labels[i] = portMappings[i].Labels
if containerNet {

View File

@ -103,7 +103,7 @@ func (c *DockerSDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultDockerSDConfig
type plain DockerSDConfig
err := unmarshal((*plain)(c))

View File

@ -90,7 +90,7 @@ func (c *DockerSwarmSDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultDockerSwarmSDConfig
type plain DockerSwarmSDConfig
err := unmarshal((*plain)(c))

View File

@ -16,6 +16,7 @@ package moby
import (
"context"
"fmt"
"maps"
"net"
"strconv"
@ -81,13 +82,9 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
}
}
for k, v := range serviceLabels[s.ServiceID] {
commonLabels[k] = v
}
maps.Copy(commonLabels, serviceLabels[s.ServiceID])
for k, v := range nodeLabels[s.NodeID] {
commonLabels[k] = v
}
maps.Copy(commonLabels, nodeLabels[s.NodeID])
for _, p := range s.Status.PortStatus.Ports {
if p.Protocol != swarm.PortConfigProtocolTCP {

View File

@ -93,7 +93,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"log/slog"
"maps"
"net"
"strconv"
@ -206,7 +207,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v)
}
for pool, address := range s.Addresses {
md, ok := address.([]interface{})
md, ok := address.([]any)
if !ok {
i.logger.Warn("Invalid type for address, expected array")
continue
@ -216,7 +217,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
continue
}
for _, address := range md {
md1, ok := address.(map[string]interface{})
md1, ok := address.(map[string]any)
if !ok {
i.logger.Warn("Invalid type for address, expected dict")
continue
@ -230,9 +231,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
continue
}
lbls := make(model.LabelSet, len(labels))
for k, v := range labels {
lbls[k] = v
}
maps.Copy(lbls, labels)
lbls[openstackLabelAddressPool] = model.LabelValue(pool)
lbls[openstackLabelPrivateIP] = model.LabelValue(addr)
if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok {

View File

@ -103,7 +103,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
if err := unmarshal((*string)(c)); err != nil {
return err
}
@ -116,7 +116,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -66,7 +66,7 @@ func (SDConfig) Name() string {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -102,7 +102,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -34,7 +34,7 @@ type Resource struct {
Parameters Parameters `json:"parameters"`
}
type Parameters map[string]interface{}
type Parameters map[string]any
func (p *Parameters) toLabels() model.LabelSet {
labels := model.LabelSet{}
@ -52,7 +52,7 @@ func (p *Parameters) toLabels() model.LabelSet {
labelValue = strconv.FormatFloat(value, 'g', -1, 64)
case []string:
labelValue = separator + strings.Join(value, separator) + separator
case []interface{}:
case []any:
if len(value) == 0 {
continue
}
@ -72,7 +72,7 @@ func (p *Parameters) toLabels() model.LabelSet {
}
}
labelValue = strings.Join(values, separator)
case map[string]interface{}:
case map[string]any:
subParameter := Parameters(value)
prefix := strutil.SanitizeLabelName(k + "_")
for subk, subv := range subParameter.toLabels() {

View File

@ -110,7 +110,7 @@ func getConfigType(out reflect.Type) reflect.Type {
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
// that have a Configs field that should be inlined.
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
func UnmarshalYAMLWithInlineConfigs(out any, unmarshal func(any) error) error {
outVal := reflect.ValueOf(out)
if outVal.Kind() != reflect.Ptr {
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
@ -198,7 +198,7 @@ func readConfigs(structVal reflect.Value, startField int) (Configs, error) {
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
// that have a Configs field that should be inlined.
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
func MarshalYAMLWithInlineConfigs(in any) (any, error) {
inVal := reflect.ValueOf(in)
for inVal.Kind() == reflect.Ptr {
inVal = inVal.Elem()

View File

@ -55,7 +55,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *role) UnmarshalYAML(unmarshal func(any) error) error {
if err := unmarshal((*string)(c)); err != nil {
return err
}
@ -125,7 +125,7 @@ func (c SDConfig) secretKeyForConfig() string {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -95,7 +95,7 @@ type refresher interface {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -20,14 +20,14 @@ type ServerListResponse struct {
}
type Server struct {
AvailabilityZone string `json:"availabilityZone"`
ID string `json:"id"`
Labels map[string]interface{} `json:"labels"`
MachineType string `json:"machineType"`
Name string `json:"name"`
Nics []ServerNetwork `json:"nics"`
PowerStatus string `json:"powerStatus"`
Status string `json:"status"`
AvailabilityZone string `json:"availabilityZone"`
ID string `json:"id"`
Labels map[string]any `json:"labels"`
MachineType string `json:"machineType"`
Name string `json:"name"`
Nics []ServerNetwork `json:"nics"`
PowerStatus string `json:"powerStatus"`
Status string `json:"status"`
}
// ServerNetwork Describes the object that matches servers to its networks.

View File

@ -37,7 +37,7 @@ func (tg Group) String() string {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (tg *Group) UnmarshalYAML(unmarshal func(any) error) error {
g := struct {
Targets []string `yaml:"targets"`
Labels model.LabelSet `yaml:"labels"`
@ -56,7 +56,7 @@ func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements the yaml.Marshaler interface.
func (tg Group) MarshalYAML() (interface{}, error) {
func (tg Group) MarshalYAML() (any, error) {
g := &struct {
Targets []string `yaml:"targets"`
Labels model.LabelSet `yaml:"labels,omitempty"`

View File

@ -93,7 +93,7 @@ func TestTargetGroupJSONMarshal(t *testing.T) {
}
func TestTargetGroupYamlMarshal(t *testing.T) {
marshal := func(g interface{}) []byte {
marshal := func(g any) []byte {
d, err := yaml.Marshal(g)
if err != nil {
panic(err)
@ -134,8 +134,8 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
}
func TestTargetGroupYamlUnmarshal(t *testing.T) {
unmarshal := func(d []byte) func(interface{}) error {
return func(o interface{}) error {
unmarshal := func(d []byte) func(any) error {
return func(o any) error {
return yaml.Unmarshal(d, o)
}
}

View File

@ -91,7 +91,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))

View File

@ -133,7 +133,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
@ -164,7 +164,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) {
var result string
err := rpcclient.Call("auth.login", []interface{}{user, pass, duration}, &result)
err := rpcclient.Call("auth.login", []any{user, pass, duration}, &result)
return result, err
}
@ -174,7 +174,7 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, enti
SystemGroups []systemGroupID `xmlrpc:"system_groups"`
}
err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []interface{}{token, entitlement}, &systemGroupsInfos)
err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []any{token, entitlement}, &systemGroupsInfos)
if err != nil {
return nil, err
}
@ -188,7 +188,7 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, enti
func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) {
var networkInfos []networkInfo
err := rpcclient.Call("system.getNetworkForSystems", []interface{}{token, systemIDs}, &networkInfos)
err := rpcclient.Call("system.getNetworkForSystems", []any{token, systemIDs}, &networkInfos)
if err != nil {
return nil, err
}
@ -208,7 +208,7 @@ func getEndpointInfoForSystems(
var endpointInfos []endpointInfo
err := rpcclient.Call(
"system.monitoring.listEndpoints",
[]interface{}{token, systemIDs}, &endpointInfos)
[]any{token, systemIDs}, &endpointInfos)
return endpointInfos, err
}

View File

@ -95,7 +95,7 @@ func (c *SDConfig) SetDirectory(dir string) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
if err := unmarshal((*plain)(c)); err != nil {

View File

@ -65,7 +65,7 @@ func (*KumaSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discove
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultKumaSDConfig
type plainKumaConf KumaSDConfig
err := unmarshal((*plainKumaConf)(c))

View File

@ -72,7 +72,7 @@ func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (dis
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultServersetSDConfig
type plain ServersetSDConfig
err := unmarshal((*plain)(c))
@ -114,7 +114,7 @@ func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discove
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultNerveSDConfig
type plain NerveSDConfig
err := unmarshal((*plain)(c))

View File

@ -3270,7 +3270,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) {
it = c.h.AllReverseBucketIterator()
length := len(c.expPositiveBuckets)
for j := 0; j < length; j++ {
for j := range length {
i := length - j - 1
b := c.expPositiveBuckets[i]
require.True(t, it.Next(), "all reverse bucket iterator exhausted too early")
@ -3286,7 +3286,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) {
require.False(t, it.Next(), "positive bucket iterator not exhausted")
it = c.h.PositiveReverseBucketIterator()
for j := 0; j < length; j++ {
for j := range length {
i := length - j - 1
b := c.expPositiveBuckets[i]
require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early")

View File

@ -402,7 +402,7 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
}
var last IBC
for i := 0; i < len(buckets); i++ {
for i := range buckets {
var c IBC
if deltas {
c = last + buckets[i]

View File

@ -22,7 +22,7 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide))
var histograms []*Histogram
for i := 0; i < numHistograms; i++ {
for i := range numHistograms {
h := &Histogram{
Count: uint64(i) + observationCount,
ZeroCount: uint64(i),
@ -35,13 +35,13 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
PositiveBuckets: make([]int64, bucketsPerSide),
}
for j := 0; j < numSpans; j++ {
for j := range numSpans {
s := Span{Offset: 1, Length: spanLength}
h.NegativeSpans[j] = s
h.PositiveSpans[j] = s
}
for j := 0; j < bucketsPerSide; j++ {
for j := range bucketsPerSide {
h.NegativeBuckets[j] = 1
h.PositiveBuckets[j] = 1
}

View File

@ -84,12 +84,12 @@ func (ls *Labels) UnmarshalJSON(b []byte) error {
}
// MarshalYAML implements yaml.Marshaler.
func (ls Labels) MarshalYAML() (interface{}, error) {
func (ls Labels) MarshalYAML() (any, error) {
return ls.Map(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler.
func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (ls *Labels) UnmarshalYAML(unmarshal func(any) error) error {
var m map[string]string
if err := unmarshal(&m); err != nil {

View File

@ -579,7 +579,7 @@ func TestLabels_DropReserved(t *testing.T) {
func ScratchBuilderForBenchmark() ScratchBuilder {
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
b := NewScratchBuilder(256)
for i := 0; i < 256; i++ {
for i := range 256 {
b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i))
}
b.Labels()
@ -625,7 +625,7 @@ func FromStringsForBenchmark(ss ...string) Labels {
func BenchmarkLabels_Get(b *testing.B) {
maxLabels := 30
allLabels := make([]Label, maxLabels)
for i := 0; i < maxLabels; i++ {
for i := range maxLabels {
allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))}
}
for _, size := range []int{5, 10, maxLabels} {
@ -906,7 +906,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
name: "typical labels under 1KB",
lbls: func() Labels {
b := NewBuilder(EmptyLabels())
for i := 0; i < 10; i++ {
for i := range 10 {
// Label ~20B name, 50B value.
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
}
@ -917,7 +917,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
name: "bigger labels over 1KB",
lbls: func() Labels {
b := NewBuilder(EmptyLabels())
for i := 0; i < 10; i++ {
for i := range 10 {
// Label ~50B name, 50B value.
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
}

View File

@ -114,9 +114,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) {
testValues = append(testValues, generateRandomValues()...)
for _, r := range regexes {
r := r
for _, v := range testValues {
v := v
t.Run(readable(r)+` on "`+readable(v)+`"`, func(t *testing.T) {
t.Parallel()
m, err := NewFastRegexMatcher(r)
@ -245,7 +243,6 @@ func TestFindSetMatches(t *testing.T) {
// too many combinations
{"[a-z][a-z]", nil, false},
} {
c := c
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
@ -416,7 +413,6 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}},
{"f.?o", nil},
} {
c := c
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
@ -683,7 +679,7 @@ func randString(randGenerator *rand.Rand, length int) string {
func randStrings(randGenerator *rand.Rand, many, length int) []string {
out := make([]string, 0, many)
for i := 0; i < many; i++ {
for range many {
out = append(out, randString(randGenerator, length))
}
return out

View File

@ -69,7 +69,7 @@ const (
)
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (a *Action) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@ -105,7 +105,7 @@ type Config struct {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultRelabelConfig
type plain Config
if err := unmarshal((*plain)(c)); err != nil {
@ -207,7 +207,7 @@ func MustNewRegexp(s string) Regexp {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (re *Regexp) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@ -221,7 +221,7 @@ func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements the yaml.Marshaler interface.
func (re Regexp) MarshalYAML() (interface{}, error) {
func (re Regexp) MarshalYAML() (any, error) {
if re.String() != "" {
return re.String(), nil
}

View File

@ -436,10 +436,7 @@ func (p *OpenMetricsParser) nextToken() token {
}
func (p *OpenMetricsParser) parseError(exp string, got token) error {
e := p.l.i + 1
if len(p.l.b) < e {
e = len(p.l.b)
}
e := min(len(p.l.b), p.l.i+1)
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
}

View File

@ -291,10 +291,7 @@ func (p *PromParser) nextToken() token {
}
func (p *PromParser) parseError(exp string, got token) error {
e := p.l.i + 1
if len(p.l.b) < e {
e = len(p.l.b)
}
e := min(len(p.l.b), p.l.i+1)
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
}

View File

@ -36,7 +36,7 @@ import (
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
var floatFormatBufPool = sync.Pool{
New: func() interface{} {
New: func() any {
// To contain at most 17 digits and additional syntax for a float64.
b := make([]byte, 0, 24)
return &b

View File

@ -130,7 +130,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ {
for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
@ -183,7 +183,7 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram
func spansToSpansProto(s []histogram.Span) []BucketSpan {
spans := make([]BucketSpan, len(s))
for i := 0; i < len(s); i++ {
for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}

View File

@ -177,7 +177,7 @@ func TestMetricStreamingDecoder(t *testing.T) {
func TestMetricStreamingDecoder_LabelsCorruption(t *testing.T) {
lastScrapeSize := 0
var allPreviousLabels []labels.Labels
buffers := pool.New(128, 1024, 2, func(sz int) interface{} { return make([]byte, 0, sz) })
buffers := pool.New(128, 1024, 2, func(sz int) any { return make([]byte, 0, sz) })
builder := labels.NewScratchBuilder(0)
for _, labelsCount := range []int{1, 2, 3, 5, 8, 5, 3, 2, 1} {
// Get buffer from pool like in scrape.go
@ -230,7 +230,7 @@ func generateMetricFamilyText(labelsCount int) string {
randomName := fmt.Sprintf("metric_%d", rand.Intn(1000))
randomHelp := fmt.Sprintf("Test metric to demonstrate forced corruption %d.", rand.Intn(1000))
labels10 := ""
for i := 0; i < labelsCount; i++ {
for range labelsCount {
labels10 += generateLabels()
}
return fmt.Sprintf(`name: "%s"

View File

@ -142,7 +142,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ {
for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
@ -200,7 +200,7 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan {
return nil
}
spans := make([]BucketSpan, len(s))
for i := 0; i < len(s); i++ {
for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}

View File

@ -43,24 +43,24 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
// These metrics will have data for all test time range
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
for j := 0; j < 10; j++ {
for j := range 10 {
metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", strconv.Itoa(j)))
}
metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", "+Inf"))
for i := 0; i < 10; i++ {
for i := range 10 {
metrics = append(metrics, labels.FromStrings("__name__", "a_ten", "l", strconv.Itoa(i)))
metrics = append(metrics, labels.FromStrings("__name__", "b_ten", "l", strconv.Itoa(i)))
for j := 0; j < 10; j++ {
for j := range 10 {
metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", strconv.Itoa(j)))
}
metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", "+Inf"))
}
for i := 0; i < 100; i++ {
for i := range 100 {
metrics = append(metrics, labels.FromStrings("__name__", "a_hundred", "l", strconv.Itoa(i)))
metrics = append(metrics, labels.FromStrings("__name__", "b_hundred", "l", strconv.Itoa(i)))
for j := 0; j < 10; j++ {
for j := range 10 {
metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", strconv.Itoa(j)))
}
metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", "+Inf"))
@ -70,7 +70,7 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
// Number points for each different label value of "l" for the sparse series
pointsPerSparseSeries := numIntervals / 50
for s := 0; s < numIntervals; s++ {
for s := range numIntervals {
a := stor.Appender(context.Background())
ts := int64(s * interval)
for i, metric := range metrics {
@ -525,7 +525,7 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
// Generate http_server_request_duration_seconds_count metrics with instance and job labels, and http_status_code label.
// the classic target_info metrics is gauge type.
metrics := make([]labels.Labels, 0, infoSeriesNum+len(statusCodes))
for i := 0; i < infoSeriesNum; i++ {
for i := range infoSeriesNum {
clusterName := "us-east"
if i >= infoSeriesNum/2 {
clusterName = "eu-south"
@ -550,7 +550,7 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
// Append the generated metrics and samples to the storage.
refs := make([]storage.SeriesRef, len(metrics))
for i := 0; i < numIntervals; i++ {
for i := range numIntervals {
a := stor.Appender(context.Background())
ts := int64(i * interval)
for j, metric := range metrics[:infoSeriesNum] {

View File

@ -633,7 +633,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
logger := slog.New(l)
f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append.
params := make(map[string]interface{}, 4)
params := make(map[string]any, 4)
params["query"] = q.q
if eq, ok := q.Statement().(*parser.EvalStmt); ok {
params["start"] = formatDate(eq.Start)
@ -650,7 +650,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
f = append(f, slog.Any("spanID", span.SpanContext().SpanID()))
}
if origin := ctx.Value(QueryOrigin{}); origin != nil {
for k, v := range origin.(map[string]interface{}) {
for k, v := range origin.(map[string]any) {
f = append(f, slog.Any(k, v))
}
}
@ -1082,7 +1082,7 @@ type evaluator struct {
}
// errorf causes a panic with the input formatted into an error.
func (ev *evaluator) errorf(format string, args ...interface{}) {
func (ev *evaluator) errorf(format string, args ...any) {
ev.error(fmt.Errorf(format, args...))
}
@ -1792,10 +1792,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
offset := durationMilliseconds(selVS.Offset)
selRange := durationMilliseconds(sel.Range)
stepRange := selRange
if stepRange > ev.interval {
stepRange = ev.interval
}
stepRange := min(selRange, ev.interval)
// Reuse objects across steps to save memory allocations.
var floats []FPoint
var histograms []HPoint
@ -3327,10 +3324,7 @@ seriesLoop:
var r float64
switch op {
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
k = int64(fParam)
if k > int64(len(inputMatrix)) {
k = int64(len(inputMatrix))
}
k = min(int64(fParam), int64(len(inputMatrix)))
if k < 1 {
if enh.Ts != ev.endTimestamp {
advanceRemainingSeries(enh.Ts, si+1)
@ -3697,7 +3691,7 @@ func changesMetricSchema(op parser.ItemType) bool {
}
// NewOriginContext returns a new context with data about the origin attached.
func NewOriginContext(ctx context.Context, data map[string]interface{}) context.Context {
func NewOriginContext(ctx context.Context, data map[string]any) context.Context {
return context.WithValue(ctx, QueryOrigin{}, data)
}

View File

@ -94,7 +94,7 @@ func TestQueryConcurrency(t *testing.T) {
}
var wg sync.WaitGroup
for i := 0; i < maxConcurrency; i++ {
for range maxConcurrency {
q := engine.NewTestQuery(f)
wg.Add(1)
go func() {
@ -134,7 +134,7 @@ func TestQueryConcurrency(t *testing.T) {
}
// Terminate remaining queries.
for i := 0; i < maxConcurrency; i++ {
for range maxConcurrency {
block <- struct{}{}
}
@ -2193,7 +2193,7 @@ func TestQueryLogger_basic(t *testing.T) {
engine.SetQueryLogger(f1)
queryExec()
logLines := getLogLines(t, ql1File)
require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"})
require.Contains(t, logLines[0], "params", map[string]any{"query": "test statement"})
require.Len(t, logLines, 1)
l := len(logLines)
@ -2246,7 +2246,7 @@ func TestQueryLogger_fields(t *testing.T) {
engine.SetQueryLogger(f1)
ctx, cancelCtx := context.WithCancel(context.Background())
ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"})
ctx = promql.NewOriginContext(ctx, map[string]any{"foo": "bar"})
defer cancelCtx()
query := engine.NewTestQuery(func(ctx context.Context) error {
return contextDone(ctx, "test statement execution")
@ -2279,7 +2279,7 @@ func TestQueryLogger_error(t *testing.T) {
engine.SetQueryLogger(f1)
ctx, cancelCtx := context.WithCancel(context.Background())
ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"})
ctx = promql.NewOriginContext(ctx, map[string]any{"foo": "bar"})
defer cancelCtx()
testErr := errors.New("failure")
query := engine.NewTestQuery(func(context.Context) error {
@ -2291,7 +2291,7 @@ func TestQueryLogger_error(t *testing.T) {
logLines := getLogLines(t, ql1File)
require.Contains(t, logLines[0], "error", testErr)
require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"})
require.Contains(t, logLines[0], "params", map[string]any{"query": "test statement"})
}
func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
@ -3339,7 +3339,6 @@ metric 0 1 2
}
for _, c := range cases {
c := c
t.Run(c.name, func(t *testing.T) {
engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery)
storage := promqltest.LoadedStorage(t, load)
@ -3987,7 +3986,7 @@ func TestSubQueryHistogramsCopy(t *testing.T) {
testQuery := `rate({__name__="http_request_duration_seconds"}[3m])`
ctx := context.Background()
for i := 0; i < 100; i++ {
for range 100 {
queryable := promqltest.LoadedStorage(t, load)
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
@ -3998,7 +3997,7 @@ func TestSubQueryHistogramsCopy(t *testing.T) {
queryable.Close()
}
for i := 0; i < 100; i++ {
for range 100 {
queryable := promqltest.LoadedStorage(t, load)
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)

View File

@ -230,10 +230,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
// First iteration to find out two things:
// - What's the smallest relevant schema?
// - Are all data points histograms?
minSchema := prev.Schema
if last.Schema < minSchema {
minSchema = last.Schema
}
minSchema := min(last.Schema, prev.Schema)
for _, currPoint := range points[1 : len(points)-1] {
curr := currPoint.H
if curr == nil {
@ -1893,11 +1890,11 @@ func (s vectorByValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByValueHeap) Push(x interface{}) {
func (s *vectorByValueHeap) Push(x any) {
*s = append(*s, *(x.(*Sample)))
}
func (s *vectorByValueHeap) Pop() interface{} {
func (s *vectorByValueHeap) Pop() any {
old := *s
n := len(old)
el := old[n-1]
@ -1923,11 +1920,11 @@ func (s vectorByReverseValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByReverseValueHeap) Push(x interface{}) {
func (s *vectorByReverseValueHeap) Push(x any) {
*s = append(*s, *(x.(*Sample)))
}
func (s *vectorByReverseValueHeap) Pop() interface{} {
func (s *vectorByReverseValueHeap) Pop() any {
old := *s
n := len(old)
el := old[n-1]
@ -1975,7 +1972,7 @@ func stringFromArg(e parser.Expr) string {
func stringSliceFromArgs(args parser.Expressions) []string {
tmp := make([]string, len(args))
for i := 0; i < len(args); i++ {
for i := range args {
tmp[i] = stringFromArg(args[i])
}
return tmp

View File

@ -50,7 +50,7 @@ func TestDeriv(t *testing.T) {
interval = 30 * 1000
// Introduce some timestamp jitter to test 0 slope case.
// https://github.com/prometheus/prometheus/issues/7180
for i = 0; i < 15; i++ {
for i = range int64(15) {
jitter := 12 * i % 2
a.Append(0, metric, start+interval*i+jitter, 1)
}

View File

@ -347,7 +347,7 @@ func (l *Lexer) acceptRun(valid string) {
// errorf returns an error token and terminates the scan by passing
// back a nil pointer that will be the next state, terminating l.NextItem.
func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
func (l *Lexer) errorf(format string, args ...any) stateFn {
*l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)}
l.scannedItem = true

View File

@ -34,7 +34,7 @@ import (
)
var parserPool = sync.Pool{
New: func() interface{} {
New: func() any {
return &parser{}
},
}
@ -62,7 +62,7 @@ type parser struct {
yyParser yyParserImpl
generatedParserResult interface{}
generatedParserResult any
parseErrors ParseErrors
}
@ -273,7 +273,7 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
}
// addParseErrf formats the error and appends it to the list of parsing errors.
func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) {
func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...any) {
p.addParseErr(positionRange, fmt.Errorf(format, args...))
}
@ -475,13 +475,13 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node, overread bool) (
}
// newMap is used when building the FloatHistogram from a map.
func (*parser) newMap() (ret map[string]interface{}) {
return map[string]interface{}{}
func (*parser) newMap() (ret map[string]any) {
return map[string]any{}
}
// mergeMaps is used to combine maps as they're used to later build the Float histogram.
// This will merge the right map into the left map.
func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
func (p *parser) mergeMaps(left, right *map[string]any) (ret *map[string]any) {
for key, value := range *right {
if _, ok := (*left)[key]; ok {
p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key)
@ -530,7 +530,7 @@ func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint6
}
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram {
func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHistogram {
output := &histogram.FloatHistogram{}
val, ok := (*desc)["schema"]
@ -623,7 +623,7 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
return output
}
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string,
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]any, bucketsKey, offsetKey string,
) (buckets []float64, spans []histogram.Span) {
bucketCount := 0
val, ok := (*desc)[bucketsKey]
@ -896,7 +896,7 @@ func parseDuration(ds string) (time.Duration, error) {
// parseGenerated invokes the yacc generated parser.
// The generated parser gets the provided startSymbol injected into
// the lexer stream, based on which grammar will be used.
func (p *parser) parseGenerated(startSymbol ItemType) interface{} {
func (p *parser) parseGenerated(startSymbol ItemType) any {
p.InjectItem(startSymbol)
p.yyParser.Parse(p)

View File

@ -32,7 +32,7 @@ import (
)
func repeatError(query string, err error, start, startStep, end, endStep, count int) (errs ParseErrors) {
for i := 0; i < count; i++ {
for i := range count {
errs = append(errs, ParseErr{
PositionRange: posrange.PositionRange{
Start: posrange.Pos(start + (i * startStep)),

View File

@ -61,12 +61,11 @@ func TestConcurrentRangeQueries(t *testing.T) {
// Limit the number of queries running at the same time.
const numConcurrent = 4
sem := make(chan struct{}, numConcurrent)
for i := 0; i < numConcurrent; i++ {
for range numConcurrent {
sem <- struct{}{}
}
var g errgroup.Group
for _, c := range cases {
c := c
if strings.Contains(c.expr, "count_values") && c.steps > 10 {
continue // This test is too big to run with -race.
}

View File

@ -219,7 +219,7 @@ func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) }
//go:embed testdata
var testsFs embed.FS
func raise(line int, format string, v ...interface{}) error {
func raise(line int, format string, v ...any) error {
return &parser.ParseErr{
LineOffset: line,
Err: fmt.Errorf(format, v...),
@ -1527,7 +1527,7 @@ func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
func (ll *LazyLoader) parse(input string) error {
lines := getLines(input)
// Accepts only 'load' command.
for i := 0; i < len(lines); i++ {
for i := range lines {
l := lines[i]
if len(l) == 0 {
continue

View File

@ -195,7 +195,7 @@ func newJSONEntry(query string, logger *slog.Logger) []byte {
}
func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) {
for i := 0; i < maxConcurrent; i++ {
for i := range maxConcurrent {
tracker.getNextIndex <- 1 + (i * entrySize)
}
}

View File

@ -48,7 +48,7 @@ func TestQueryLogging(t *testing.T) {
}
// Check for inserts of queries.
for i := 0; i < 4; i++ {
for i := range 4 {
start := 1 + i*entrySize
end := start + entrySize
@ -60,7 +60,7 @@ func TestQueryLogging(t *testing.T) {
}
// Check if all queries have been deleted.
for i := 0; i < 4; i++ {
for i := range 4 {
queryLogger.Delete(1 + i*entrySize)
}
require.True(t, regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1:1+entrySize*4]),
@ -94,7 +94,7 @@ func TestIndexReuse(t *testing.T) {
}
// Check all bytes and verify new query was inserted at index 2
for i := 0; i < 3; i++ {
for i := range 3 {
start := 1 + i*entrySize
end := start + entrySize

View File

@ -45,7 +45,7 @@ func (s String) String() string {
}
func (s String) MarshalJSON() ([]byte, error) {
return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V})
return json.Marshal([...]any{float64(s.T) / 1000, s.V})
}
// Scalar is a data point that's explicitly not associated with a metric.
@ -61,7 +61,7 @@ func (s Scalar) String() string {
func (s Scalar) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(s.V, 'f', -1, 64)
return json.Marshal([...]interface{}{float64(s.T) / 1000, v})
return json.Marshal([...]any{float64(s.T) / 1000, v})
}
// Series is a stream of data points belonging to a metric.
@ -111,7 +111,7 @@ func (p FPoint) String() string {
// timestamp.
func (p FPoint) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(p.F, 'f', -1, 64)
return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
return json.Marshal([...]any{float64(p.T) / 1000, v})
}
// HPoint represents a single histogram data point for a given timestamp.
@ -136,9 +136,9 @@ func (p HPoint) String() string {
// timestamp.
func (p HPoint) MarshalJSON() ([]byte, error) {
h := struct {
Count string `json:"count"`
Sum string `json:"sum"`
Buckets [][]interface{} `json:"buckets,omitempty"`
Count string `json:"count"`
Sum string `json:"sum"`
Buckets [][]any `json:"buckets,omitempty"`
}{
Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64),
Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64),
@ -161,7 +161,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
boundaries = 0 // Inclusive only on upper end AKA left open.
}
}
bucketToMarshal := []interface{}{
bucketToMarshal := []any{
boundaries,
strconv.FormatFloat(bucket.Lower, 'f', -1, 64),
strconv.FormatFloat(bucket.Upper, 'f', -1, 64),
@ -169,7 +169,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
}
h.Buckets = append(h.Buckets, bucketToMarshal)
}
return json.Marshal([...]interface{}{float64(p.T) / 1000, h})
return json.Marshal([...]any{float64(p.T) / 1000, h})
}
// size returns the size of the HPoint compared to the size of an FPoint.

View File

@ -596,10 +596,7 @@ func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay
if alert.needsSending(ts, resendDelay) {
alert.LastSentAt = ts
// Allow for two Eval or Alertmanager send failures.
delta := resendDelay
if interval > resendDelay {
delta = interval
}
delta := max(interval, resendDelay)
alert.ValidUntil = ts.Add(4 * delta)
anew := *alert
// The notifier re-uses the labels slice, hence make a copy.

View File

@ -17,6 +17,7 @@ import (
"context"
"errors"
"log/slog"
"maps"
"math"
"slices"
"strings"
@ -215,7 +216,7 @@ func (g *Group) run(ctx context.Context) {
return
}
ctx = promql.NewOriginContext(ctx, map[string]interface{}{
ctx = promql.NewOriginContext(ctx, map[string]any{
"ruleGroup": map[string]string{
"file": g.File(),
"name": g.Name(),
@ -482,9 +483,7 @@ func (g *Group) CopyState(from *Group) {
continue
}
for fp, a := range far.active {
ar.active[fp] = a
}
maps.Copy(ar.active, far.active)
}
// Handle deleted and unmatched duplicate rules.

View File

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"log/slog"
maps0 "maps"
"net/url"
"path/filepath"
"slices"
@ -582,9 +583,7 @@ func FromMaps(maps ...map[string]string) labels.Labels {
mLables := make(map[string]string)
for _, m := range maps {
for k, v := range m {
mLables[k] = v
}
maps0.Copy(mLables, m)
}
return labels.FromMap(mLables)

View File

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"io/fs"
"maps"
"math"
"os"
"path"
@ -821,9 +822,7 @@ func TestUpdate(t *testing.T) {
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil)
require.NoError(t, err)
for h, g := range ruleManager.groups {
ogs[h] = g
}
maps.Copy(ogs, ruleManager.groups)
// Update interval and reload.
for i, g := range rgs.Groups {
@ -2480,8 +2479,6 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
// Evaluate groups concurrently (like they normally do).
var wg sync.WaitGroup
for _, group := range groups {
group := group
wg.Add(1)
go func() {
group.Eval(ctx, time.Now())
@ -2532,7 +2529,7 @@ func TestGroup_Eval_RaceConditionOnStoppingGroupEvaluationWhileRulesAreEvaluated
<-ruleManager.block
// Update the group a decent number of times to simulate start and stopping in the middle of an evaluation.
for i := 0; i < 10; i++ {
for range 10 {
err := ruleManager.Update(time.Second, files, labels.EmptyLabels(), "", nil)
require.NoError(t, err)

View File

@ -62,7 +62,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
graceShut: make(chan struct{}),
triggerReload: make(chan struct{}, 1),
metrics: sm,
buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }),
buffers: pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }),
}
m.metrics.setTargetMetadataCacheGatherer(m)

View File

@ -594,7 +594,7 @@ func TestManagerTargetsUpdates(t *testing.T) {
defer m.Stop()
tgSent := make(map[string][]*targetgroup.Group)
for x := 0; x < 10; x++ {
for x := range 10 {
tgSent[strconv.Itoa(x)] = []*targetgroup.Group{
{
Source: strconv.Itoa(x),
@ -1056,7 +1056,7 @@ scrape_configs:
func TestUnregisterMetrics(t *testing.T) {
reg := prometheus.NewRegistry()
// Check that all metrics can be unregistered, allowing a second manager to be created.
for i := 0; i < 2; i++ {
for range 2 {
opts := Options{}
manager, err := NewManager(&opts, nil, nil, nil, reg)
require.NotNil(t, manager)

View File

@ -1262,7 +1262,7 @@ func newScrapeLoop(ctx context.Context,
l = promslog.NewNopLogger()
}
if buffers == nil {
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
}
if cache == nil {
cache = newScrapeCache(metrics)

View File

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"math"
"net/http"
"net/http/httptest"
@ -459,7 +460,7 @@ func TestScrapePoolStop(t *testing.T) {
// clean them and the respective targets up. It must wait until each loop's
// stop function returned before returning itself.
for i := 0; i < numTargets; i++ {
for i := range numTargets {
t := &Target{
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
scrapeConfig: &config.ScrapeConfig{},
@ -547,7 +548,7 @@ func TestScrapePoolReload(t *testing.T) {
// loops and start new ones. A new loop must not be started before the preceding
// one terminated.
for i := 0; i < numTargets; i++ {
for i := range numTargets {
labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
t := &Target{
labels: labels,
@ -569,9 +570,7 @@ func TestScrapePoolReload(t *testing.T) {
done := make(chan struct{})
beforeTargets := map[uint64]*Target{}
for h, t := range sp.activeTargets {
beforeTargets[h] = t
}
maps.Copy(beforeTargets, sp.activeTargets)
reloadTime := time.Now()
@ -691,7 +690,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
}
tgs := []*targetgroup.Group{}
for i := 0; i < 50; i++ {
for i := range 50 {
tgs = append(tgs,
&targetgroup.Group{
Targets: []model.LabelSet{
@ -904,7 +903,7 @@ func TestScrapePoolRaces(t *testing.T) {
require.Len(t, active, expectedActive, "Invalid number of active targets")
require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets")
for i := 0; i < 20; i++ {
for range 20 {
time.Sleep(10 * time.Millisecond)
sp.reload(newConfig())
}
@ -1437,7 +1436,7 @@ func makeTestGauges(n int) []byte {
sb := bytes.Buffer{}
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
for i := 0; i < n; i++ {
for i := range n {
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
}
fmt.Fprintf(&sb, "# EOF\n")
@ -1817,7 +1816,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
numScrapes++
if numScrapes < 5 {
s := ""
for i := 0; i < 500; i++ {
for i := range 500 {
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
}
w.Write([]byte(s + "&"))
@ -1929,7 +1928,7 @@ func TestScrapeLoopAppend(t *testing.T) {
}
}
func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) {
t.Helper()
testutil.RequireEqualWithOptions(t, expected, actual,
[]cmp.Option{cmp.Comparer(equalFloatSamples), cmp.AllowUnexported(histogramSample{})},
@ -3894,7 +3893,7 @@ func TestReuseCacheRace(t *testing.T) {
MetricNameValidationScheme: model.UTF8Validation,
MetricNameEscapingScheme: model.AllowUTF8,
}
buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
buffers = pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) })
sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
t1 = &Target{
labels: labels.FromStrings("labelNew", "nameNew"),
@ -4357,7 +4356,7 @@ func TestConvertClassicHistogramsToNHCB(t *testing.T) {
`, name, value)
}
genTestHistText := func(name string, withMetadata bool) string {
data := map[string]interface{}{
data := map[string]any{
"name": name,
}
b := &bytes.Buffer{}
@ -5145,7 +5144,7 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
{metricsCount: 100000},
}
for i := 0; i < len(scenarios); i++ {
for i := range scenarios {
var buf bytes.Buffer
var name string
gw := gzip.NewWriter(&buf)
@ -5266,7 +5265,6 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) {
},
}
for name, tc := range testcases {
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema)

View File

@ -489,7 +489,7 @@ scrape_configs:
for _, nTargets := range []int{1, 10, 100} {
b.Run(fmt.Sprintf("%d_targets", nTargets), func(b *testing.B) {
targets := []model.LabelSet{}
for i := 0; i < nTargets; i++ {
for i := range nTargets {
labels := model.LabelSet{
model.AddressLabel: model.LabelValue(fmt.Sprintf("localhost:%d", i)),
"__meta_kubernetes_namespace": "some_namespace",
@ -501,7 +501,7 @@ scrape_configs:
"__meta_kubernetes_pod_phase": "Running",
}
// Add some more labels, because Kubernetes SD generates a lot
for i := 0; i < 10; i++ {
for i := range 10 {
labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_label_extra%d", i))] = "a_label_abcdefgh"
labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_labelpresent_extra%d", i))] = "true"
}

View File

@ -233,10 +233,7 @@ func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *Label
}
func mergeStrings(a, b []string) []string {
maxl := len(a)
if len(b) > len(a) {
maxl = len(b)
}
maxl := max(len(b), len(a))
res := make([]string, 0, maxl*10/9)
for len(a) > 0 && len(b) > 0 {
@ -440,11 +437,11 @@ func (h genericSeriesSetHeap) Less(i, j int) bool {
return labels.Compare(a, b) < 0
}
func (h *genericSeriesSetHeap) Push(x interface{}) {
func (h *genericSeriesSetHeap) Push(x any) {
*h = append(*h, x.(genericSeriesSet))
}
func (h *genericSeriesSetHeap) Pop() interface{} {
func (h *genericSeriesSetHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
@ -698,11 +695,11 @@ func (h samplesIteratorHeap) Less(i, j int) bool {
return h[i].AtT() < h[j].AtT()
}
func (h *samplesIteratorHeap) Push(x interface{}) {
func (h *samplesIteratorHeap) Push(x any) {
*h = append(*h, x.(chunkenc.Iterator))
}
func (h *samplesIteratorHeap) Pop() interface{} {
func (h *samplesIteratorHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
@ -846,11 +843,11 @@ func (h chunkIteratorHeap) Less(i, j int) bool {
return at.MinTime < bt.MinTime
}
func (h *chunkIteratorHeap) Push(x interface{}) {
func (h *chunkIteratorHeap) Push(x any) {
*h = append(*h, x.(chunks.Iterator))
}
func (h *chunkIteratorHeap) Pop() interface{} {
func (h *chunkIteratorHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]

View File

@ -1329,10 +1329,10 @@ func TestChainSampleIteratorSeekHistogramCounterResetHint(t *testing.T) {
func makeSeries(numSeries, numSamples int) []Series {
series := []Series{}
for j := 0; j < numSeries; j++ {
for j := range numSeries {
labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
samples := []chunks.Sample{}
for k := 0; k < numSamples; k++ {
for k := range numSamples {
samples = append(samples, fSample{t: int64(k), f: float64(k)})
}
series = append(series, NewListSeries(labels, samples))
@ -1393,9 +1393,9 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
func BenchmarkMergeLabelValuesWithLimit(b *testing.B) {
var queriers []genericQuerier
for i := 0; i < 5; i++ {
for i := range 5 {
var lbls []string
for j := 0; j < 100000; j++ {
for j := range 100000 {
lbls = append(lbls, fmt.Sprintf("querier_%d_label_%d", i, j))
}
q := &mockQuerier{resp: lbls}
@ -1680,7 +1680,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}
// Check slice but ignore difference between nil and empty.
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) {
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...any) {
if len(a) == 0 {
require.Empty(t, b, msgAndArgs...)
} else {

View File

@ -215,7 +215,7 @@ func (c *AzureADConfig) Validate() error {
}
// UnmarshalYAML unmarshal the Azure AD config yaml.
func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *AzureADConfig) UnmarshalYAML(unmarshal func(any) error) error {
type plain AzureADConfig
*c = AzureADConfig{}
if err := unmarshal((*plain)(c)); err != nil {

View File

@ -1017,7 +1017,7 @@ func createSampledResponseHandler(t *testing.T, queries []*prompb.Query) http.Ha
var timeseries []*prompb.TimeSeries
// Create 2 series per query
for seriesIndex := 0; seriesIndex < 2; seriesIndex++ {
for seriesIndex := range 2 {
var labels []prompb.Label
if queryIndex == 0 {
labels = []prompb.Label{

View File

@ -537,7 +537,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
require.Equal(t, expected, fh)
// Keep calling Next() until the end.
for i := 0; i < 3; i++ {
for range 3 {
require.Equal(t, chunkenc.ValHistogram, it.Next())
}
@ -1025,7 +1025,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk {
time := startTime
for i := 0; i < numTestChunks; i++ {
for i := range numTestChunks {
c := chunkenc.NewXORChunk()
a, err := c.Appender()
@ -1033,7 +1033,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk {
minTimeMs := time
for j := 0; j < numSamplesPerTestChunk; j++ {
for j := range numSamplesPerTestChunk {
a.Append(time, float64(i+j))
time += int64(1000)
}

View File

@ -151,7 +151,7 @@ func TestDialContextWithRandomConnections(t *testing.T) {
t.Run(name, func(t *testing.T) {
dc := tc.setup()
require.NotNil(t, dc)
for i := 0; i < numberOfRuns; i++ {
for range numberOfRuns {
_, err := dc.dialContextFn()(context.Background(), testNetwork, tc.addr)
require.NoError(t, err)
}

View File

@ -75,7 +75,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
require.True(t, ok)
require.Equal(t, int64(1), interned.refs.Load(), "wrong interned refs count")
for i := 0; i < 1000; i++ {
for range 1000 {
released := make(chan struct{})
go func() {
interner.release(testString)

Some files were not shown because too many files have changed in this diff Show More