Apply analyzer "modernize" to the whole codebase
See https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize for details. This ran into a few issues (arguably bugs in the modernize tool), which I will fix in the next commit, so that we have transparency what was done automatically. Beyond those hiccups, I believe all the changes applied are legitimate. Even where there might be no tangible direct gain, I would argue it's still better to use the "modern" way to avoid micro discussions in tiny style PRs later. Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
parent
9cbb3a66c9
commit
747c5ee2b1
|
@ -673,7 +673,7 @@ func main() {
|
||||||
// Set Go runtime parameters before we get too far into initialization.
|
// Set Go runtime parameters before we get too far into initialization.
|
||||||
updateGoGC(cfgFile, logger)
|
updateGoGC(cfgFile, logger)
|
||||||
if cfg.maxprocsEnable {
|
if cfg.maxprocsEnable {
|
||||||
l := func(format string, a ...interface{}) {
|
l := func(format string, a ...any) {
|
||||||
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
||||||
}
|
}
|
||||||
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
||||||
|
|
|
@ -202,7 +202,6 @@ func TestSendAlerts(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
tc := tc
|
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||||
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
|
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
|
||||||
require.NotEmpty(t, tc.in, "sender called with 0 alert")
|
require.NotEmpty(t, tc.in, "sender called with 0 alert")
|
||||||
|
|
|
@ -53,7 +53,7 @@ func TestStartupInterrupt(t *testing.T) {
|
||||||
url := "http://localhost" + port + "/graph"
|
url := "http://localhost" + port + "/graph"
|
||||||
|
|
||||||
Loop:
|
Loop:
|
||||||
for x := 0; x < 10; x++ {
|
for range 10 {
|
||||||
// error=nil means prometheus has started, so we can send the interrupt
|
// error=nil means prometheus has started, so we can send the interrupt
|
||||||
// signal and wait for the graceful shutdown.
|
// signal and wait for the graceful shutdown.
|
||||||
if _, err := http.Get(url); err == nil {
|
if _, err := http.Get(url); err == nil {
|
||||||
|
|
|
@ -70,7 +70,7 @@ func (p *queryLogTest) skip(t *testing.T) {
|
||||||
// waitForPrometheus waits for Prometheus to be ready.
|
// waitForPrometheus waits for Prometheus to be ready.
|
||||||
func (p *queryLogTest) waitForPrometheus() error {
|
func (p *queryLogTest) waitForPrometheus() error {
|
||||||
var err error
|
var err error
|
||||||
for x := 0; x < 20; x++ {
|
for range 20 {
|
||||||
var r *http.Response
|
var r *http.Response
|
||||||
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
|
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
|
||||||
break
|
break
|
||||||
|
|
|
@ -207,7 +207,7 @@ func calcClassicBucketStatistics(matrix model.Matrix) (*statistics, error) {
|
||||||
sortMatrix(matrix)
|
sortMatrix(matrix)
|
||||||
|
|
||||||
totalPop := 0
|
totalPop := 0
|
||||||
for timeIdx := 0; timeIdx < numSamples; timeIdx++ {
|
for timeIdx := range numSamples {
|
||||||
curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx)
|
curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, err
|
return stats, err
|
||||||
|
|
|
@ -155,10 +155,7 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
lbls := lbls
|
lbls := lbls
|
||||||
for len(lbls) > 0 {
|
for len(lbls) > 0 {
|
||||||
l := 1000
|
l := min(len(lbls), 1000)
|
||||||
if len(lbls) < 1000 {
|
|
||||||
l = len(lbls)
|
|
||||||
}
|
|
||||||
batch := lbls[:l]
|
batch := lbls[:l]
|
||||||
lbls = lbls[l:]
|
lbls = lbls[l:]
|
||||||
|
|
||||||
|
@ -200,7 +197,7 @@ func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount in
|
||||||
}
|
}
|
||||||
total := uint64(0)
|
total := uint64(0)
|
||||||
|
|
||||||
for i := 0; i < scrapeCount; i++ {
|
for range scrapeCount {
|
||||||
app := b.storage.Appender(context.TODO())
|
app := b.storage.Appender(context.TODO())
|
||||||
ts += timeDelta
|
ts += timeDelta
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -278,9 +279,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
||||||
for k := range alertEvalTimesMap {
|
for k := range alertEvalTimesMap {
|
||||||
alertEvalTimes = append(alertEvalTimes, k)
|
alertEvalTimes = append(alertEvalTimes, k)
|
||||||
}
|
}
|
||||||
sort.Slice(alertEvalTimes, func(i, j int) bool {
|
slices.Sort(alertEvalTimes)
|
||||||
return alertEvalTimes[i] < alertEvalTimes[j]
|
|
||||||
})
|
|
||||||
|
|
||||||
// Current index in alertEvalTimes what we are looking at.
|
// Current index in alertEvalTimes what we are looking at.
|
||||||
curr := 0
|
curr := 0
|
||||||
|
|
|
@ -367,7 +367,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
|
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
|
||||||
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultConfig
|
*c = DefaultConfig
|
||||||
// We want to set c to the defaults and then overwrite it with the input.
|
// We want to set c to the defaults and then overwrite it with the input.
|
||||||
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
|
// To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
|
||||||
|
@ -594,7 +594,7 @@ func (c *GlobalConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *GlobalConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
// Create a clean global config as the previous one was already populated
|
// Create a clean global config as the previous one was already populated
|
||||||
// by the default due to the YAML parser behavior for empty blocks.
|
// by the default due to the YAML parser behavior for empty blocks.
|
||||||
gc := &GlobalConfig{}
|
gc := &GlobalConfig{}
|
||||||
|
@ -630,11 +630,7 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return errors.New("global scrape timeout greater than scrape interval")
|
return errors.New("global scrape timeout greater than scrape interval")
|
||||||
}
|
}
|
||||||
if gc.ScrapeTimeout == 0 {
|
if gc.ScrapeTimeout == 0 {
|
||||||
if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval {
|
gc.ScrapeTimeout = min(DefaultGlobalConfig.ScrapeTimeout, gc.ScrapeInterval)
|
||||||
gc.ScrapeTimeout = gc.ScrapeInterval
|
|
||||||
} else {
|
|
||||||
gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if gc.EvaluationInterval == 0 {
|
if gc.EvaluationInterval == 0 {
|
||||||
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
||||||
|
@ -790,7 +786,7 @@ func (c *ScrapeConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultScrapeConfig
|
*c = DefaultScrapeConfig
|
||||||
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
|
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -841,11 +837,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
|
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
|
||||||
}
|
}
|
||||||
if c.ScrapeTimeout == 0 {
|
if c.ScrapeTimeout == 0 {
|
||||||
if globalConfig.ScrapeTimeout > c.ScrapeInterval {
|
c.ScrapeTimeout = min(globalConfig.ScrapeTimeout, c.ScrapeInterval)
|
||||||
c.ScrapeTimeout = c.ScrapeInterval
|
|
||||||
} else {
|
|
||||||
c.ScrapeTimeout = globalConfig.ScrapeTimeout
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if c.BodySizeLimit == 0 {
|
if c.BodySizeLimit == 0 {
|
||||||
c.BodySizeLimit = globalConfig.BodySizeLimit
|
c.BodySizeLimit = globalConfig.BodySizeLimit
|
||||||
|
@ -970,7 +962,7 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the yaml.Marshaler interface.
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
func (c *ScrapeConfig) MarshalYAML() (interface{}, error) {
|
func (c *ScrapeConfig) MarshalYAML() (any, error) {
|
||||||
return discovery.MarshalYAMLWithInlineConfigs(c)
|
return discovery.MarshalYAMLWithInlineConfigs(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1024,7 +1016,7 @@ type TSDBConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (t *TSDBConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*t = TSDBConfig{}
|
*t = TSDBConfig{}
|
||||||
type plain TSDBConfig
|
type plain TSDBConfig
|
||||||
if err := unmarshal((*plain)(t)); err != nil {
|
if err := unmarshal((*plain)(t)); err != nil {
|
||||||
|
@ -1046,7 +1038,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (t *TracingClientType) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (t *TracingClientType) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*t = TracingClientType("")
|
*t = TracingClientType("")
|
||||||
type plain TracingClientType
|
type plain TracingClientType
|
||||||
if err := unmarshal((*plain)(t)); err != nil {
|
if err := unmarshal((*plain)(t)); err != nil {
|
||||||
|
@ -1080,7 +1072,7 @@ func (t *TracingConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (t *TracingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (t *TracingConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*t = TracingConfig{
|
*t = TracingConfig{
|
||||||
ClientType: TracingClientGRPC,
|
ClientType: TracingClientGRPC,
|
||||||
}
|
}
|
||||||
|
@ -1140,7 +1132,7 @@ func (c *AlertingConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *AlertingConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
// Create a clean global config as the previous one was already populated
|
// Create a clean global config as the previous one was already populated
|
||||||
// by the default due to the YAML parser behavior for empty blocks.
|
// by the default due to the YAML parser behavior for empty blocks.
|
||||||
*c = AlertingConfig{}
|
*c = AlertingConfig{}
|
||||||
|
@ -1175,7 +1167,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
|
||||||
type AlertmanagerAPIVersion string
|
type AlertmanagerAPIVersion string
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*v = AlertmanagerAPIVersion("")
|
*v = AlertmanagerAPIVersion("")
|
||||||
type plain AlertmanagerAPIVersion
|
type plain AlertmanagerAPIVersion
|
||||||
if err := unmarshal((*plain)(v)); err != nil {
|
if err := unmarshal((*plain)(v)); err != nil {
|
||||||
|
@ -1234,7 +1226,7 @@ func (c *AlertmanagerConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultAlertmanagerConfig
|
*c = DefaultAlertmanagerConfig
|
||||||
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
|
if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1291,7 +1283,7 @@ func (c *AlertmanagerConfig) Validate(nameValidationScheme model.ValidationSchem
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the yaml.Marshaler interface.
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) {
|
func (c *AlertmanagerConfig) MarshalYAML() (any, error) {
|
||||||
return discovery.MarshalYAMLWithInlineConfigs(c)
|
return discovery.MarshalYAMLWithInlineConfigs(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1395,7 +1387,7 @@ func (c *RemoteWriteConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultRemoteWriteConfig
|
*c = DefaultRemoteWriteConfig
|
||||||
type plain RemoteWriteConfig
|
type plain RemoteWriteConfig
|
||||||
if err := unmarshal((*plain)(c)); err != nil {
|
if err := unmarshal((*plain)(c)); err != nil {
|
||||||
|
@ -1560,7 +1552,7 @@ func (c *RemoteReadConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultRemoteReadConfig
|
*c = DefaultRemoteReadConfig
|
||||||
type plain RemoteReadConfig
|
type plain RemoteReadConfig
|
||||||
if err := unmarshal((*plain)(c)); err != nil {
|
if err := unmarshal((*plain)(c)); err != nil {
|
||||||
|
@ -1620,7 +1612,7 @@ type OTLPConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultOTLPConfig
|
*c = DefaultOTLPConfig
|
||||||
type plain OTLPConfig
|
type plain OTLPConfig
|
||||||
if err := unmarshal((*plain)(c)); err != nil {
|
if err := unmarshal((*plain)(c)); err != nil {
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
|
||||||
func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultEC2SDConfig
|
*c = DefaultEC2SDConfig
|
||||||
type plain EC2SDConfig
|
type plain EC2SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -98,7 +98,7 @@ func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (dis
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
|
||||||
func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultLightsailSDConfig
|
*c = DefaultLightsailSDConfig
|
||||||
type plain LightsailSDConfig
|
type plain LightsailSDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -138,7 +138,7 @@ func validateAuthParam(param, name string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -142,7 +142,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -437,8 +437,8 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnmarshalConfig(t *testing.T) {
|
func TestUnmarshalConfig(t *testing.T) {
|
||||||
unmarshal := func(d []byte) func(interface{}) error {
|
unmarshal := func(d []byte) func(any) error {
|
||||||
return func(o interface{}) error {
|
return func(o any) error {
|
||||||
return yaml.Unmarshal(d, o)
|
return yaml.Unmarshal(d, o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,7 +93,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -108,7 +108,7 @@ func (c *Configs) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements yaml.Unmarshaler.
|
// UnmarshalYAML implements yaml.Unmarshaler.
|
||||||
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Configs) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
cfgTyp := reflect.StructOf(configFields)
|
cfgTyp := reflect.StructOf(configFields)
|
||||||
cfgPtr := reflect.New(cfgTyp)
|
cfgPtr := reflect.New(cfgTyp)
|
||||||
cfgVal := cfgPtr.Elem()
|
cfgVal := cfgPtr.Elem()
|
||||||
|
@ -123,7 +123,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements yaml.Marshaler.
|
// MarshalYAML implements yaml.Marshaler.
|
||||||
func (c Configs) MarshalYAML() (interface{}, error) {
|
func (c Configs) MarshalYAML() (any, error) {
|
||||||
cfgTyp := reflect.StructOf(configFields)
|
cfgTyp := reflect.StructOf(configFields)
|
||||||
cfgPtr := reflect.New(cfgTyp)
|
cfgPtr := reflect.New(cfgTyp)
|
||||||
cfgVal := cfgPtr.Elem()
|
cfgVal := cfgPtr.Elem()
|
||||||
|
|
|
@ -82,7 +82,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -251,7 +251,6 @@ func TestDNS(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
tc := tc
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -282,8 +281,8 @@ func TestSDConfigUnmarshalYAML(t *testing.T) {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
unmarshal := func(d []byte) func(interface{}) error {
|
unmarshal := func(d []byte) func(any) error {
|
||||||
return func(o interface{}) error {
|
return func(o any) error {
|
||||||
return yaml.Unmarshal(d, o)
|
return yaml.Unmarshal(d, o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,7 +97,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -78,7 +79,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
@ -120,9 +121,7 @@ func (t *TimestampCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
t.lock.RLock()
|
t.lock.RLock()
|
||||||
for fileSD := range t.discoverers {
|
for fileSD := range t.discoverers {
|
||||||
fileSD.lock.RLock()
|
fileSD.lock.RLock()
|
||||||
for filename, timestamp := range fileSD.timestamps {
|
maps.Copy(uniqueFiles, fileSD.timestamps)
|
||||||
uniqueFiles[filename] = timestamp
|
|
||||||
}
|
|
||||||
fileSD.lock.RUnlock()
|
fileSD.lock.RUnlock()
|
||||||
}
|
}
|
||||||
t.lock.RUnlock()
|
t.lock.RUnlock()
|
||||||
|
|
|
@ -327,7 +327,6 @@ func TestInitialUpdate(t *testing.T) {
|
||||||
"fixtures/valid.yml",
|
"fixtures/valid.yml",
|
||||||
"fixtures/valid.json",
|
"fixtures/valid.json",
|
||||||
} {
|
} {
|
||||||
tc := tc
|
|
||||||
t.Run(tc, func(t *testing.T) {
|
t.Run(tc, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -348,7 +347,6 @@ func TestInvalidFile(t *testing.T) {
|
||||||
"fixtures/invalid_nil.yml",
|
"fixtures/invalid_nil.yml",
|
||||||
"fixtures/invalid_nil.json",
|
"fixtures/invalid_nil.json",
|
||||||
} {
|
} {
|
||||||
tc := tc
|
|
||||||
t.Run(tc, func(t *testing.T) {
|
t.Run(tc, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Di
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -99,7 +99,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -106,7 +106,7 @@ func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -83,15 +83,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := e.endpointsInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
epAddCount.Inc()
|
epAddCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
epUpdateCount.Inc()
|
epUpdateCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
epDeleteCount.Inc()
|
epDeleteCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
|
@ -100,7 +100,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||||
l.Error("Error adding endpoints event handler.", "err", err)
|
l.Error("Error adding endpoints event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o any) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error("converting to Service object failed", "err", err)
|
e.logger.Error("converting to Service object failed", "err", err)
|
||||||
|
@ -119,15 +119,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||||
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
// TODO(fabxc): potentially remove add and delete event handlers. Those should
|
// TODO(fabxc): potentially remove add and delete event handlers. Those should
|
||||||
// be triggered via the endpoint handlers already.
|
// be triggered via the endpoint handlers already.
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
svcAddCount.Inc()
|
svcAddCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
svcUpdateCount.Inc()
|
svcUpdateCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
svcDeleteCount.Inc()
|
svcDeleteCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
|
@ -136,7 +136,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||||
l.Error("Error adding services event handler.", "err", err)
|
l.Error("Error adding services event handler.", "err", err)
|
||||||
}
|
}
|
||||||
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur any) {
|
||||||
podUpdateCount.Inc()
|
podUpdateCount.Inc()
|
||||||
oldPod, ok := old.(*apiv1.Pod)
|
oldPod, ok := old.(*apiv1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -160,15 +160,15 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||||
}
|
}
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
nodeName, err := nodeName(o)
|
nodeName, err := nodeName(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Error getting Node name", "err", err)
|
l.Error("Error getting Node name", "err", err)
|
||||||
|
@ -183,7 +183,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node,
|
||||||
|
|
||||||
if e.withNamespaceMetadata {
|
if e.withNamespaceMetadata {
|
||||||
_, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
namespace := o.(*apiv1.Namespace)
|
namespace := o.(*apiv1.Namespace)
|
||||||
e.enqueueNamespace(namespace.Name)
|
e.enqueueNamespace(namespace.Name)
|
||||||
},
|
},
|
||||||
|
@ -234,7 +234,7 @@ func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Endpoints) enqueue(obj interface{}) {
|
func (e *Endpoints) enqueue(obj any) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@ -303,7 +303,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertToEndpoints(o interface{}) (*apiv1.Endpoints, error) {
|
func convertToEndpoints(o any) (*apiv1.Endpoints, error) {
|
||||||
endpoints, ok := o.(*apiv1.Endpoints)
|
endpoints, ok := o.(*apiv1.Endpoints)
|
||||||
if ok {
|
if ok {
|
||||||
return endpoints, nil
|
return endpoints, nil
|
||||||
|
|
|
@ -83,15 +83,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := e.endpointSliceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
epslAddCount.Inc()
|
epslAddCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
epslUpdateCount.Inc()
|
epslUpdateCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
epslDeleteCount.Inc()
|
epslDeleteCount.Inc()
|
||||||
e.enqueue(o)
|
e.enqueue(o)
|
||||||
},
|
},
|
||||||
|
@ -100,7 +100,7 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
||||||
l.Error("Error adding endpoint slices event handler.", "err", err)
|
l.Error("Error adding endpoint slices event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o any) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Error("converting to Service object failed", "err", err)
|
e.logger.Error("converting to Service object failed", "err", err)
|
||||||
|
@ -118,15 +118,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
svcAddCount.Inc()
|
svcAddCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
svcUpdateCount.Inc()
|
svcUpdateCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
svcDeleteCount.Inc()
|
svcDeleteCount.Inc()
|
||||||
serviceUpdate(o)
|
serviceUpdate(o)
|
||||||
},
|
},
|
||||||
|
@ -137,15 +137,15 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
nodeName, err := nodeName(o)
|
nodeName, err := nodeName(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Error getting Node name", "err", err)
|
l.Error("Error getting Node name", "err", err)
|
||||||
|
@ -160,7 +160,7 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
||||||
|
|
||||||
if e.withNamespaceMetadata {
|
if e.withNamespaceMetadata {
|
||||||
_, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
namespace := o.(*apiv1.Namespace)
|
namespace := o.(*apiv1.Namespace)
|
||||||
e.enqueueNamespace(namespace.Name)
|
e.enqueueNamespace(namespace.Name)
|
||||||
},
|
},
|
||||||
|
@ -199,7 +199,7 @@ func (e *EndpointSlice) enqueueNamespace(namespace string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *EndpointSlice) enqueue(obj interface{}) {
|
func (e *EndpointSlice) enqueue(obj any) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -56,15 +56,15 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
ingressAddCount.Inc()
|
ingressAddCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
ingressDeleteCount.Inc()
|
ingressDeleteCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
ingressUpdateCount.Inc()
|
ingressUpdateCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
|
@ -75,7 +75,7 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||||
|
|
||||||
if s.withNamespaceMetadata {
|
if s.withNamespaceMetadata {
|
||||||
_, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
namespace := o.(*apiv1.Namespace)
|
namespace := o.(*apiv1.Namespace)
|
||||||
s.enqueueNamespace(namespace.Name)
|
s.enqueueNamespace(namespace.Name)
|
||||||
},
|
},
|
||||||
|
@ -90,7 +90,7 @@ func NewIngress(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Ingress) enqueue(obj interface{}) {
|
func (i *Ingress) enqueue(obj any) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -16,6 +16,7 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -193,9 +194,7 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
|
||||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||||
|
|
||||||
expected := expectedTargetGroups("ns1", TLSNo)
|
expected := expectedTargetGroups("ns1", TLSNo)
|
||||||
for k, v := range expectedTargetGroups("ns2", TLSNo) {
|
maps.Copy(expected, expectedTargetGroups("ns2", TLSNo))
|
||||||
expected[k] = v
|
|
||||||
}
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
afterStart: func() {
|
afterStart: func() {
|
||||||
|
|
|
@ -80,7 +80,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,7 @@ type AttachMetadataConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
@ -234,7 +234,7 @@ type NamespaceDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *NamespaceDiscovery) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = NamespaceDiscovery{}
|
*c = NamespaceDiscovery{}
|
||||||
type plain NamespaceDiscovery
|
type plain NamespaceDiscovery
|
||||||
return unmarshal((*plain)(c))
|
return unmarshal((*plain)(c))
|
||||||
|
@ -698,7 +698,7 @@ func (d *Discovery) newNamespaceInformer(ctx context.Context) cache.SharedInform
|
||||||
func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
||||||
indexers := make(map[string]cache.IndexFunc)
|
indexers := make(map[string]cache.IndexFunc)
|
||||||
if d.attachMetadata.Node {
|
if d.attachMetadata.Node {
|
||||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
indexers[nodeIndex] = func(obj any) ([]string, error) {
|
||||||
pod, ok := obj.(*apiv1.Pod)
|
pod, ok := obj.(*apiv1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("object is not a pod")
|
return nil, errors.New("object is not a pod")
|
||||||
|
@ -716,7 +716,7 @@ func (d *Discovery) newIndexedPodsInformer(plw *cache.ListWatch) cache.SharedInd
|
||||||
|
|
||||||
func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
|
||||||
indexers := make(map[string]cache.IndexFunc)
|
indexers := make(map[string]cache.IndexFunc)
|
||||||
indexers[podIndex] = func(obj interface{}) ([]string, error) {
|
indexers[podIndex] = func(obj any) ([]string, error) {
|
||||||
e, ok := obj.(*apiv1.Endpoints)
|
e, ok := obj.(*apiv1.Endpoints)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("object is not endpoints")
|
return nil, errors.New("object is not endpoints")
|
||||||
|
@ -733,7 +733,7 @@ func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.Shar
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.attachMetadata.Node {
|
if d.attachMetadata.Node {
|
||||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
indexers[nodeIndex] = func(obj any) ([]string, error) {
|
||||||
e, ok := obj.(*apiv1.Endpoints)
|
e, ok := obj.(*apiv1.Endpoints)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("object is not endpoints")
|
return nil, errors.New("object is not endpoints")
|
||||||
|
@ -766,7 +766,7 @@ func (d *Discovery) newIndexedEndpointsInformer(plw *cache.ListWatch) cache.Shar
|
||||||
|
|
||||||
func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
|
func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
|
||||||
indexers := make(map[string]cache.IndexFunc)
|
indexers := make(map[string]cache.IndexFunc)
|
||||||
indexers[serviceIndex] = func(obj interface{}) ([]string, error) {
|
indexers[serviceIndex] = func(obj any) ([]string, error) {
|
||||||
e, ok := obj.(*disv1.EndpointSlice)
|
e, ok := obj.(*disv1.EndpointSlice)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("object is not an endpointslice")
|
return nil, errors.New("object is not an endpointslice")
|
||||||
|
@ -781,7 +781,7 @@ func (d *Discovery) newIndexedEndpointSlicesInformer(plw *cache.ListWatch, objec
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.attachMetadata.Node {
|
if d.attachMetadata.Node {
|
||||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
indexers[nodeIndex] = func(obj any) ([]string, error) {
|
||||||
e, ok := obj.(*disv1.EndpointSlice)
|
e, ok := obj.(*disv1.EndpointSlice)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("object is not an endpointslice")
|
return nil, errors.New("object is not an endpointslice")
|
||||||
|
@ -886,7 +886,7 @@ func namespacedName(namespace, name string) string {
|
||||||
|
|
||||||
// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone.
|
// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone.
|
||||||
// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key.
|
// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key.
|
||||||
func nodeName(o interface{}) (string, error) {
|
func nodeName(o any) (string, error) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -302,7 +302,6 @@ func TestFailuresCountMetric(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
tc := tc
|
|
||||||
t.Run(string(tc.role), func(t *testing.T) {
|
t.Run(string(tc.role), func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -62,15 +62,15 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := n.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
nodeAddCount.Inc()
|
nodeAddCount.Inc()
|
||||||
n.enqueue(o)
|
n.enqueue(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
nodeDeleteCount.Inc()
|
nodeDeleteCount.Inc()
|
||||||
n.enqueue(o)
|
n.enqueue(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
nodeUpdateCount.Inc()
|
nodeUpdateCount.Inc()
|
||||||
n.enqueue(o)
|
n.enqueue(o)
|
||||||
},
|
},
|
||||||
|
@ -81,7 +81,7 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Node) enqueue(obj interface{}) {
|
func (n *Node) enqueue(obj any) {
|
||||||
key, err := nodeName(obj)
|
key, err := nodeName(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@ -140,7 +140,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertToNode(o interface{}) (*apiv1.Node, error) {
|
func convertToNode(o any) (*apiv1.Node, error) {
|
||||||
node, ok := o.(*apiv1.Node)
|
node, ok := o.(*apiv1.Node)
|
||||||
if ok {
|
if ok {
|
||||||
return node, nil
|
return node, nil
|
||||||
|
|
|
@ -71,15 +71,15 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
|
||||||
queue: workqueue.NewNamed(RolePod.String()),
|
queue: workqueue.NewNamed(RolePod.String()),
|
||||||
}
|
}
|
||||||
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := p.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
podAddCount.Inc()
|
podAddCount.Inc()
|
||||||
p.enqueue(o)
|
p.enqueue(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
podDeleteCount.Inc()
|
podDeleteCount.Inc()
|
||||||
p.enqueue(o)
|
p.enqueue(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
podUpdateCount.Inc()
|
podUpdateCount.Inc()
|
||||||
p.enqueue(o)
|
p.enqueue(o)
|
||||||
},
|
},
|
||||||
|
@ -90,15 +90,15 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
|
||||||
|
|
||||||
if p.withNodeMetadata {
|
if p.withNodeMetadata {
|
||||||
_, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = p.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
p.enqueuePodsForNode(node.Name)
|
p.enqueuePodsForNode(node.Name)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
node := o.(*apiv1.Node)
|
node := o.(*apiv1.Node)
|
||||||
p.enqueuePodsForNode(node.Name)
|
p.enqueuePodsForNode(node.Name)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
nodeName, err := nodeName(o)
|
nodeName, err := nodeName(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Error("Error getting Node name", "err", err)
|
l.Error("Error getting Node name", "err", err)
|
||||||
|
@ -113,7 +113,7 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
|
||||||
|
|
||||||
if p.withNamespaceMetadata {
|
if p.withNamespaceMetadata {
|
||||||
_, err = p.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = p.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
namespace := o.(*apiv1.Namespace)
|
namespace := o.(*apiv1.Namespace)
|
||||||
p.enqueuePodsForNamespace(namespace.Name)
|
p.enqueuePodsForNamespace(namespace.Name)
|
||||||
},
|
},
|
||||||
|
@ -128,7 +128,7 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes, namespace cac
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pod) enqueue(obj interface{}) {
|
func (p *Pod) enqueue(obj any) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@ -195,7 +195,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertToPod(o interface{}) (*apiv1.Pod, error) {
|
func convertToPod(o any) (*apiv1.Pod, error) {
|
||||||
pod, ok := o.(*apiv1.Pod)
|
pod, ok := o.(*apiv1.Pod)
|
||||||
if ok {
|
if ok {
|
||||||
return pod, nil
|
return pod, nil
|
||||||
|
|
|
@ -16,6 +16,7 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -437,9 +438,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) {
|
||||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||||
|
|
||||||
expected := expectedPodTargetGroups("ns1")
|
expected := expectedPodTargetGroups("ns1")
|
||||||
for k, v := range expectedPodTargetGroups("ns2") {
|
maps.Copy(expected, expectedPodTargetGroups("ns2"))
|
||||||
expected[k] = v
|
|
||||||
}
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
beforeRun: func() {
|
beforeRun: func() {
|
||||||
|
|
|
@ -61,15 +61,15 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err := s.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: func(o interface{}) {
|
AddFunc: func(o any) {
|
||||||
svcAddCount.Inc()
|
svcAddCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o any) {
|
||||||
svcDeleteCount.Inc()
|
svcDeleteCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
svcUpdateCount.Inc()
|
svcUpdateCount.Inc()
|
||||||
s.enqueue(o)
|
s.enqueue(o)
|
||||||
},
|
},
|
||||||
|
@ -80,7 +80,7 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||||
|
|
||||||
if s.withNamespaceMetadata {
|
if s.withNamespaceMetadata {
|
||||||
_, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = s.namespaceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(_, o interface{}) {
|
UpdateFunc: func(_, o any) {
|
||||||
namespace := o.(*apiv1.Namespace)
|
namespace := o.(*apiv1.Namespace)
|
||||||
s.enqueueNamespace(namespace.Name)
|
s.enqueueNamespace(namespace.Name)
|
||||||
},
|
},
|
||||||
|
@ -95,7 +95,7 @@ func NewService(l *slog.Logger, inf cache.SharedIndexInformer, namespace cache.S
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) enqueue(obj interface{}) {
|
func (s *Service) enqueue(obj any) {
|
||||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
|
@ -171,7 +171,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func convertToService(o interface{}) (*apiv1.Service, error) {
|
func convertToService(o any) (*apiv1.Service, error) {
|
||||||
service, ok := o.(*apiv1.Service)
|
service, ok := o.(*apiv1.Service)
|
||||||
if ok {
|
if ok {
|
||||||
return service, nil
|
return service, nil
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"maps"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -37,7 +38,7 @@ type poolKey struct {
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
name string
|
name string
|
||||||
d Discoverer
|
d Discoverer
|
||||||
config interface{}
|
config any
|
||||||
|
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
// done should be called after cleaning up resources associated with cancelled provider.
|
// done should be called after cleaning up resources associated with cancelled provider.
|
||||||
|
@ -62,7 +63,7 @@ func (p *Provider) IsStarted() bool {
|
||||||
return p.cancel != nil
|
return p.cancel != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Provider) Config() interface{} {
|
func (p *Provider) Config() any {
|
||||||
return p.config
|
return p.config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,9 +256,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error {
|
||||||
}
|
}
|
||||||
if l := len(refTargets); l > 0 {
|
if l := len(refTargets); l > 0 {
|
||||||
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l)
|
||||||
for k, v := range refTargets {
|
maps.Copy(m.targets[poolKey{s, prov.name}], refTargets)
|
||||||
m.targets[poolKey{s, prov.name}][k] = v
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.targetsMtx.Unlock()
|
m.targetsMtx.Unlock()
|
||||||
|
|
|
@ -668,7 +668,6 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, tc := range testCases {
|
for i, tc := range testCases {
|
||||||
tc := tc
|
|
||||||
t.Run(tc.title, func(t *testing.T) {
|
t.Run(tc.title, func(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -1350,7 +1349,6 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
tc := tc
|
|
||||||
t.Run(tc.title, func(t *testing.T) {
|
t.Run(tc.title, func(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -1471,7 +1469,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
||||||
wg.Add(2000)
|
wg.Add(2000)
|
||||||
|
|
||||||
start := make(chan struct{})
|
start := make(chan struct{})
|
||||||
for i := 0; i < 1000; i++ {
|
for range 1000 {
|
||||||
go func() {
|
go func() {
|
||||||
<-start
|
<-start
|
||||||
td.update([]*targetgroup.Group{
|
td.update([]*targetgroup.Group{
|
||||||
|
@ -1485,7 +1483,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
for i := range 1000 {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
<-start
|
<-start
|
||||||
c := map[string]Configs{
|
c := map[string]Configs{
|
||||||
|
@ -1545,7 +1543,7 @@ func (t *testDiscoverer) update(tgs []*targetgroup.Group) {
|
||||||
func TestUnregisterMetrics(t *testing.T) {
|
func TestUnregisterMetrics(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
|
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
|
@ -101,7 +101,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
@ -513,7 +513,7 @@ func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32
|
||||||
ports := make([]uint32, len(portMappings))
|
ports := make([]uint32, len(portMappings))
|
||||||
labels := make([]map[string]string, len(portMappings))
|
labels := make([]map[string]string, len(portMappings))
|
||||||
|
|
||||||
for i := 0; i < len(portMappings); i++ {
|
for i := range portMappings {
|
||||||
labels[i] = portMappings[i].Labels
|
labels[i] = portMappings[i].Labels
|
||||||
|
|
||||||
if containerNet {
|
if containerNet {
|
||||||
|
|
|
@ -103,7 +103,7 @@ func (c *DockerSDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultDockerSDConfig
|
*c = DefaultDockerSDConfig
|
||||||
type plain DockerSDConfig
|
type plain DockerSDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -90,7 +90,7 @@ func (c *DockerSwarmSDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultDockerSwarmSDConfig
|
*c = DefaultDockerSwarmSDConfig
|
||||||
type plain DockerSwarmSDConfig
|
type plain DockerSwarmSDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -16,6 +16,7 @@ package moby
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
@ -81,13 +82,9 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range serviceLabels[s.ServiceID] {
|
maps.Copy(commonLabels, serviceLabels[s.ServiceID])
|
||||||
commonLabels[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range nodeLabels[s.NodeID] {
|
maps.Copy(commonLabels, nodeLabels[s.NodeID])
|
||||||
commonLabels[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range s.Status.PortStatus.Ports {
|
for _, p := range s.Status.PortStatus.Ports {
|
||||||
if p.Protocol != swarm.PortConfigProtocolTCP {
|
if p.Protocol != swarm.PortConfigProtocolTCP {
|
||||||
|
|
|
@ -93,7 +93,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"maps"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
@ -206,7 +207,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v)
|
labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v)
|
||||||
}
|
}
|
||||||
for pool, address := range s.Addresses {
|
for pool, address := range s.Addresses {
|
||||||
md, ok := address.([]interface{})
|
md, ok := address.([]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
i.logger.Warn("Invalid type for address, expected array")
|
i.logger.Warn("Invalid type for address, expected array")
|
||||||
continue
|
continue
|
||||||
|
@ -216,7 +217,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, address := range md {
|
for _, address := range md {
|
||||||
md1, ok := address.(map[string]interface{})
|
md1, ok := address.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
i.logger.Warn("Invalid type for address, expected dict")
|
i.logger.Warn("Invalid type for address, expected dict")
|
||||||
continue
|
continue
|
||||||
|
@ -230,9 +231,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
lbls := make(model.LabelSet, len(labels))
|
lbls := make(model.LabelSet, len(labels))
|
||||||
for k, v := range labels {
|
maps.Copy(lbls, labels)
|
||||||
lbls[k] = v
|
|
||||||
}
|
|
||||||
lbls[openstackLabelAddressPool] = model.LabelValue(pool)
|
lbls[openstackLabelAddressPool] = model.LabelValue(pool)
|
||||||
lbls[openstackLabelPrivateIP] = model.LabelValue(addr)
|
lbls[openstackLabelPrivateIP] = model.LabelValue(addr)
|
||||||
if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok {
|
if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok {
|
||||||
|
|
|
@ -103,7 +103,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Role) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -66,7 +66,7 @@ func (SDConfig) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -102,7 +102,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -34,7 +34,7 @@ type Resource struct {
|
||||||
Parameters Parameters `json:"parameters"`
|
Parameters Parameters `json:"parameters"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Parameters map[string]interface{}
|
type Parameters map[string]any
|
||||||
|
|
||||||
func (p *Parameters) toLabels() model.LabelSet {
|
func (p *Parameters) toLabels() model.LabelSet {
|
||||||
labels := model.LabelSet{}
|
labels := model.LabelSet{}
|
||||||
|
@ -52,7 +52,7 @@ func (p *Parameters) toLabels() model.LabelSet {
|
||||||
labelValue = strconv.FormatFloat(value, 'g', -1, 64)
|
labelValue = strconv.FormatFloat(value, 'g', -1, 64)
|
||||||
case []string:
|
case []string:
|
||||||
labelValue = separator + strings.Join(value, separator) + separator
|
labelValue = separator + strings.Join(value, separator) + separator
|
||||||
case []interface{}:
|
case []any:
|
||||||
if len(value) == 0 {
|
if len(value) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func (p *Parameters) toLabels() model.LabelSet {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
labelValue = strings.Join(values, separator)
|
labelValue = strings.Join(values, separator)
|
||||||
case map[string]interface{}:
|
case map[string]any:
|
||||||
subParameter := Parameters(value)
|
subParameter := Parameters(value)
|
||||||
prefix := strutil.SanitizeLabelName(k + "_")
|
prefix := strutil.SanitizeLabelName(k + "_")
|
||||||
for subk, subv := range subParameter.toLabels() {
|
for subk, subv := range subParameter.toLabels() {
|
||||||
|
|
|
@ -110,7 +110,7 @@ func getConfigType(out reflect.Type) reflect.Type {
|
||||||
|
|
||||||
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
|
// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs
|
||||||
// that have a Configs field that should be inlined.
|
// that have a Configs field that should be inlined.
|
||||||
func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error {
|
func UnmarshalYAMLWithInlineConfigs(out any, unmarshal func(any) error) error {
|
||||||
outVal := reflect.ValueOf(out)
|
outVal := reflect.ValueOf(out)
|
||||||
if outVal.Kind() != reflect.Ptr {
|
if outVal.Kind() != reflect.Ptr {
|
||||||
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out)
|
||||||
|
@ -198,7 +198,7 @@ func readConfigs(structVal reflect.Value, startField int) (Configs, error) {
|
||||||
|
|
||||||
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
|
// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs
|
||||||
// that have a Configs field that should be inlined.
|
// that have a Configs field that should be inlined.
|
||||||
func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) {
|
func MarshalYAMLWithInlineConfigs(in any) (any, error) {
|
||||||
inVal := reflect.ValueOf(in)
|
inVal := reflect.ValueOf(in)
|
||||||
for inVal.Kind() == reflect.Ptr {
|
for inVal.Kind() == reflect.Ptr {
|
||||||
inVal = inVal.Elem()
|
inVal = inVal.Elem()
|
||||||
|
|
|
@ -55,7 +55,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *role) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ func (c SDConfig) secretKeyForConfig() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -95,7 +95,7 @@ type refresher interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -20,14 +20,14 @@ type ServerListResponse struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
AvailabilityZone string `json:"availabilityZone"`
|
AvailabilityZone string `json:"availabilityZone"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Labels map[string]interface{} `json:"labels"`
|
Labels map[string]any `json:"labels"`
|
||||||
MachineType string `json:"machineType"`
|
MachineType string `json:"machineType"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Nics []ServerNetwork `json:"nics"`
|
Nics []ServerNetwork `json:"nics"`
|
||||||
PowerStatus string `json:"powerStatus"`
|
PowerStatus string `json:"powerStatus"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerNetwork Describes the object that matches servers to its networks.
|
// ServerNetwork Describes the object that matches servers to its networks.
|
||||||
|
|
|
@ -37,7 +37,7 @@ func (tg Group) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (tg *Group) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
g := struct {
|
g := struct {
|
||||||
Targets []string `yaml:"targets"`
|
Targets []string `yaml:"targets"`
|
||||||
Labels model.LabelSet `yaml:"labels"`
|
Labels model.LabelSet `yaml:"labels"`
|
||||||
|
@ -56,7 +56,7 @@ func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the yaml.Marshaler interface.
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
func (tg Group) MarshalYAML() (interface{}, error) {
|
func (tg Group) MarshalYAML() (any, error) {
|
||||||
g := &struct {
|
g := &struct {
|
||||||
Targets []string `yaml:"targets"`
|
Targets []string `yaml:"targets"`
|
||||||
Labels model.LabelSet `yaml:"labels,omitempty"`
|
Labels model.LabelSet `yaml:"labels,omitempty"`
|
||||||
|
|
|
@ -93,7 +93,7 @@ func TestTargetGroupJSONMarshal(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetGroupYamlMarshal(t *testing.T) {
|
func TestTargetGroupYamlMarshal(t *testing.T) {
|
||||||
marshal := func(g interface{}) []byte {
|
marshal := func(g any) []byte {
|
||||||
d, err := yaml.Marshal(g)
|
d, err := yaml.Marshal(g)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -134,8 +134,8 @@ func TestTargetGroupYamlMarshal(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTargetGroupYamlUnmarshal(t *testing.T) {
|
func TestTargetGroupYamlUnmarshal(t *testing.T) {
|
||||||
unmarshal := func(d []byte) func(interface{}) error {
|
unmarshal := func(d []byte) func(any) error {
|
||||||
return func(o interface{}) error {
|
return func(o any) error {
|
||||||
return yaml.Unmarshal(d, o)
|
return yaml.Unmarshal(d, o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
@ -164,7 +164,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) {
|
func login(rpcclient *xmlrpc.Client, user, pass string, duration int) (string, error) {
|
||||||
var result string
|
var result string
|
||||||
err := rpcclient.Call("auth.login", []interface{}{user, pass, duration}, &result)
|
err := rpcclient.Call("auth.login", []any{user, pass, duration}, &result)
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, enti
|
||||||
SystemGroups []systemGroupID `xmlrpc:"system_groups"`
|
SystemGroups []systemGroupID `xmlrpc:"system_groups"`
|
||||||
}
|
}
|
||||||
|
|
||||||
err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []interface{}{token, entitlement}, &systemGroupsInfos)
|
err := rpcclient.Call("system.listSystemGroupsForSystemsWithEntitlement", []any{token, entitlement}, &systemGroupsInfos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, enti
|
||||||
|
|
||||||
func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) {
|
func getNetworkInformationForSystems(rpcclient *xmlrpc.Client, token string, systemIDs []int) (map[int]networkInfo, error) {
|
||||||
var networkInfos []networkInfo
|
var networkInfos []networkInfo
|
||||||
err := rpcclient.Call("system.getNetworkForSystems", []interface{}{token, systemIDs}, &networkInfos)
|
err := rpcclient.Call("system.getNetworkForSystems", []any{token, systemIDs}, &networkInfos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ func getEndpointInfoForSystems(
|
||||||
var endpointInfos []endpointInfo
|
var endpointInfos []endpointInfo
|
||||||
err := rpcclient.Call(
|
err := rpcclient.Call(
|
||||||
"system.monitoring.listEndpoints",
|
"system.monitoring.listEndpoints",
|
||||||
[]interface{}{token, systemIDs}, &endpointInfos)
|
[]any{token, systemIDs}, &endpointInfos)
|
||||||
return endpointInfos, err
|
return endpointInfos, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ func (c *SDConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultSDConfig
|
*c = DefaultSDConfig
|
||||||
type plain SDConfig
|
type plain SDConfig
|
||||||
if err := unmarshal((*plain)(c)); err != nil {
|
if err := unmarshal((*plain)(c)); err != nil {
|
||||||
|
|
|
@ -65,7 +65,7 @@ func (*KumaSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discove
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *KumaSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultKumaSDConfig
|
*c = DefaultKumaSDConfig
|
||||||
type plainKumaConf KumaSDConfig
|
type plainKumaConf KumaSDConfig
|
||||||
err := unmarshal((*plainKumaConf)(c))
|
err := unmarshal((*plainKumaConf)(c))
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (dis
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultServersetSDConfig
|
*c = DefaultServersetSDConfig
|
||||||
type plain ServersetSDConfig
|
type plain ServersetSDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
@ -114,7 +114,7 @@ func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discove
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultNerveSDConfig
|
*c = DefaultNerveSDConfig
|
||||||
type plain NerveSDConfig
|
type plain NerveSDConfig
|
||||||
err := unmarshal((*plain)(c))
|
err := unmarshal((*plain)(c))
|
||||||
|
|
|
@ -3270,7 +3270,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) {
|
||||||
|
|
||||||
it = c.h.AllReverseBucketIterator()
|
it = c.h.AllReverseBucketIterator()
|
||||||
length := len(c.expPositiveBuckets)
|
length := len(c.expPositiveBuckets)
|
||||||
for j := 0; j < length; j++ {
|
for j := range length {
|
||||||
i := length - j - 1
|
i := length - j - 1
|
||||||
b := c.expPositiveBuckets[i]
|
b := c.expPositiveBuckets[i]
|
||||||
require.True(t, it.Next(), "all reverse bucket iterator exhausted too early")
|
require.True(t, it.Next(), "all reverse bucket iterator exhausted too early")
|
||||||
|
@ -3286,7 +3286,7 @@ func TestFloatCustomBucketsIterators(t *testing.T) {
|
||||||
require.False(t, it.Next(), "positive bucket iterator not exhausted")
|
require.False(t, it.Next(), "positive bucket iterator not exhausted")
|
||||||
|
|
||||||
it = c.h.PositiveReverseBucketIterator()
|
it = c.h.PositiveReverseBucketIterator()
|
||||||
for j := 0; j < length; j++ {
|
for j := range length {
|
||||||
i := length - j - 1
|
i := length - j - 1
|
||||||
b := c.expPositiveBuckets[i]
|
b := c.expPositiveBuckets[i]
|
||||||
require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early")
|
require.True(t, it.Next(), "positive reverse bucket iterator exhausted too early")
|
||||||
|
|
|
@ -402,7 +402,7 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
|
||||||
}
|
}
|
||||||
|
|
||||||
var last IBC
|
var last IBC
|
||||||
for i := 0; i < len(buckets); i++ {
|
for i := range buckets {
|
||||||
var c IBC
|
var c IBC
|
||||||
if deltas {
|
if deltas {
|
||||||
c = last + buckets[i]
|
c = last + buckets[i]
|
||||||
|
|
|
@ -22,7 +22,7 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
|
||||||
observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide))
|
observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide))
|
||||||
|
|
||||||
var histograms []*Histogram
|
var histograms []*Histogram
|
||||||
for i := 0; i < numHistograms; i++ {
|
for i := range numHistograms {
|
||||||
h := &Histogram{
|
h := &Histogram{
|
||||||
Count: uint64(i) + observationCount,
|
Count: uint64(i) + observationCount,
|
||||||
ZeroCount: uint64(i),
|
ZeroCount: uint64(i),
|
||||||
|
@ -35,13 +35,13 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
|
||||||
PositiveBuckets: make([]int64, bucketsPerSide),
|
PositiveBuckets: make([]int64, bucketsPerSide),
|
||||||
}
|
}
|
||||||
|
|
||||||
for j := 0; j < numSpans; j++ {
|
for j := range numSpans {
|
||||||
s := Span{Offset: 1, Length: spanLength}
|
s := Span{Offset: 1, Length: spanLength}
|
||||||
h.NegativeSpans[j] = s
|
h.NegativeSpans[j] = s
|
||||||
h.PositiveSpans[j] = s
|
h.PositiveSpans[j] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
for j := 0; j < bucketsPerSide; j++ {
|
for j := range bucketsPerSide {
|
||||||
h.NegativeBuckets[j] = 1
|
h.NegativeBuckets[j] = 1
|
||||||
h.PositiveBuckets[j] = 1
|
h.PositiveBuckets[j] = 1
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,12 +84,12 @@ func (ls *Labels) UnmarshalJSON(b []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements yaml.Marshaler.
|
// MarshalYAML implements yaml.Marshaler.
|
||||||
func (ls Labels) MarshalYAML() (interface{}, error) {
|
func (ls Labels) MarshalYAML() (any, error) {
|
||||||
return ls.Map(), nil
|
return ls.Map(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements yaml.Unmarshaler.
|
// UnmarshalYAML implements yaml.Unmarshaler.
|
||||||
func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (ls *Labels) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var m map[string]string
|
var m map[string]string
|
||||||
|
|
||||||
if err := unmarshal(&m); err != nil {
|
if err := unmarshal(&m); err != nil {
|
||||||
|
|
|
@ -579,7 +579,7 @@ func TestLabels_DropReserved(t *testing.T) {
|
||||||
func ScratchBuilderForBenchmark() ScratchBuilder {
|
func ScratchBuilderForBenchmark() ScratchBuilder {
|
||||||
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
|
// (Only relevant to -tags dedupelabels: stuff the symbol table before adding the real labels, to avoid having everything fitting into 1 byte.)
|
||||||
b := NewScratchBuilder(256)
|
b := NewScratchBuilder(256)
|
||||||
for i := 0; i < 256; i++ {
|
for i := range 256 {
|
||||||
b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i))
|
b.Add(fmt.Sprintf("name%d", i), fmt.Sprintf("value%d", i))
|
||||||
}
|
}
|
||||||
b.Labels()
|
b.Labels()
|
||||||
|
@ -625,7 +625,7 @@ func FromStringsForBenchmark(ss ...string) Labels {
|
||||||
func BenchmarkLabels_Get(b *testing.B) {
|
func BenchmarkLabels_Get(b *testing.B) {
|
||||||
maxLabels := 30
|
maxLabels := 30
|
||||||
allLabels := make([]Label, maxLabels)
|
allLabels := make([]Label, maxLabels)
|
||||||
for i := 0; i < maxLabels; i++ {
|
for i := range maxLabels {
|
||||||
allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))}
|
allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5+(i%5))}
|
||||||
}
|
}
|
||||||
for _, size := range []int{5, 10, maxLabels} {
|
for _, size := range []int{5, 10, maxLabels} {
|
||||||
|
@ -906,7 +906,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
|
||||||
name: "typical labels under 1KB",
|
name: "typical labels under 1KB",
|
||||||
lbls: func() Labels {
|
lbls: func() Labels {
|
||||||
b := NewBuilder(EmptyLabels())
|
b := NewBuilder(EmptyLabels())
|
||||||
for i := 0; i < 10; i++ {
|
for i := range 10 {
|
||||||
// Label ~20B name, 50B value.
|
// Label ~20B name, 50B value.
|
||||||
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
|
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
|
||||||
}
|
}
|
||||||
|
@ -917,7 +917,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
|
||||||
name: "bigger labels over 1KB",
|
name: "bigger labels over 1KB",
|
||||||
lbls: func() Labels {
|
lbls: func() Labels {
|
||||||
b := NewBuilder(EmptyLabels())
|
b := NewBuilder(EmptyLabels())
|
||||||
for i := 0; i < 10; i++ {
|
for i := range 10 {
|
||||||
// Label ~50B name, 50B value.
|
// Label ~50B name, 50B value.
|
||||||
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
|
b.Set(fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i))
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,9 +114,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) {
|
||||||
testValues = append(testValues, generateRandomValues()...)
|
testValues = append(testValues, generateRandomValues()...)
|
||||||
|
|
||||||
for _, r := range regexes {
|
for _, r := range regexes {
|
||||||
r := r
|
|
||||||
for _, v := range testValues {
|
for _, v := range testValues {
|
||||||
v := v
|
|
||||||
t.Run(readable(r)+` on "`+readable(v)+`"`, func(t *testing.T) {
|
t.Run(readable(r)+` on "`+readable(v)+`"`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
m, err := NewFastRegexMatcher(r)
|
m, err := NewFastRegexMatcher(r)
|
||||||
|
@ -245,7 +243,6 @@ func TestFindSetMatches(t *testing.T) {
|
||||||
// too many combinations
|
// too many combinations
|
||||||
{"[a-z][a-z]", nil, false},
|
{"[a-z][a-z]", nil, false},
|
||||||
} {
|
} {
|
||||||
c := c
|
|
||||||
t.Run(c.pattern, func(t *testing.T) {
|
t.Run(c.pattern, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
|
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
|
||||||
|
@ -416,7 +413,6 @@ func TestStringMatcherFromRegexp(t *testing.T) {
|
||||||
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}},
|
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}},
|
||||||
{"f.?o", nil},
|
{"f.?o", nil},
|
||||||
} {
|
} {
|
||||||
c := c
|
|
||||||
t.Run(c.pattern, func(t *testing.T) {
|
t.Run(c.pattern, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
|
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
|
||||||
|
@ -683,7 +679,7 @@ func randString(randGenerator *rand.Rand, length int) string {
|
||||||
|
|
||||||
func randStrings(randGenerator *rand.Rand, many, length int) []string {
|
func randStrings(randGenerator *rand.Rand, many, length int) []string {
|
||||||
out := make([]string, 0, many)
|
out := make([]string, 0, many)
|
||||||
for i := 0; i < many; i++ {
|
for range many {
|
||||||
out = append(out, randString(randGenerator, length))
|
out = append(out, randString(randGenerator, length))
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
|
|
|
@ -69,7 +69,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (a *Action) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var s string
|
var s string
|
||||||
if err := unmarshal(&s); err != nil {
|
if err := unmarshal(&s); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -105,7 +105,7 @@ type Config struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Config) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
*c = DefaultRelabelConfig
|
*c = DefaultRelabelConfig
|
||||||
type plain Config
|
type plain Config
|
||||||
if err := unmarshal((*plain)(c)); err != nil {
|
if err := unmarshal((*plain)(c)); err != nil {
|
||||||
|
@ -207,7 +207,7 @@ func MustNewRegexp(s string) Regexp {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (re *Regexp) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
var s string
|
var s string
|
||||||
if err := unmarshal(&s); err != nil {
|
if err := unmarshal(&s); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -221,7 +221,7 @@ func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the yaml.Marshaler interface.
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
func (re Regexp) MarshalYAML() (interface{}, error) {
|
func (re Regexp) MarshalYAML() (any, error) {
|
||||||
if re.String() != "" {
|
if re.String() != "" {
|
||||||
return re.String(), nil
|
return re.String(), nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -436,10 +436,7 @@ func (p *OpenMetricsParser) nextToken() token {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *OpenMetricsParser) parseError(exp string, got token) error {
|
func (p *OpenMetricsParser) parseError(exp string, got token) error {
|
||||||
e := p.l.i + 1
|
e := min(len(p.l.b), p.l.i+1)
|
||||||
if len(p.l.b) < e {
|
|
||||||
e = len(p.l.b)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
|
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -291,10 +291,7 @@ func (p *PromParser) nextToken() token {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PromParser) parseError(exp string, got token) error {
|
func (p *PromParser) parseError(exp string, got token) error {
|
||||||
e := p.l.i + 1
|
e := min(len(p.l.b), p.l.i+1)
|
||||||
if len(p.l.b) < e {
|
|
||||||
e = len(p.l.b)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
|
return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ import (
|
||||||
|
|
||||||
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
|
// floatFormatBufPool is exclusively used in formatOpenMetricsFloat.
|
||||||
var floatFormatBufPool = sync.Pool{
|
var floatFormatBufPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
// To contain at most 17 digits and additional syntax for a float64.
|
// To contain at most 17 digits and additional syntax for a float64.
|
||||||
b := make([]byte, 0, 24)
|
b := make([]byte, 0, 24)
|
||||||
return &b
|
return &b
|
||||||
|
|
|
@ -130,7 +130,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||||
|
|
||||||
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||||
spans := make([]histogram.Span, len(s))
|
spans := make([]histogram.Span, len(s))
|
||||||
for i := 0; i < len(s); i++ {
|
for i := range s {
|
||||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -183,7 +183,7 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram
|
||||||
|
|
||||||
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||||
spans := make([]BucketSpan, len(s))
|
spans := make([]BucketSpan, len(s))
|
||||||
for i := 0; i < len(s); i++ {
|
for i := range s {
|
||||||
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -177,7 +177,7 @@ func TestMetricStreamingDecoder(t *testing.T) {
|
||||||
func TestMetricStreamingDecoder_LabelsCorruption(t *testing.T) {
|
func TestMetricStreamingDecoder_LabelsCorruption(t *testing.T) {
|
||||||
lastScrapeSize := 0
|
lastScrapeSize := 0
|
||||||
var allPreviousLabels []labels.Labels
|
var allPreviousLabels []labels.Labels
|
||||||
buffers := pool.New(128, 1024, 2, func(sz int) interface{} { return make([]byte, 0, sz) })
|
buffers := pool.New(128, 1024, 2, func(sz int) any { return make([]byte, 0, sz) })
|
||||||
builder := labels.NewScratchBuilder(0)
|
builder := labels.NewScratchBuilder(0)
|
||||||
for _, labelsCount := range []int{1, 2, 3, 5, 8, 5, 3, 2, 1} {
|
for _, labelsCount := range []int{1, 2, 3, 5, 8, 5, 3, 2, 1} {
|
||||||
// Get buffer from pool like in scrape.go
|
// Get buffer from pool like in scrape.go
|
||||||
|
@ -230,7 +230,7 @@ func generateMetricFamilyText(labelsCount int) string {
|
||||||
randomName := fmt.Sprintf("metric_%d", rand.Intn(1000))
|
randomName := fmt.Sprintf("metric_%d", rand.Intn(1000))
|
||||||
randomHelp := fmt.Sprintf("Test metric to demonstrate forced corruption %d.", rand.Intn(1000))
|
randomHelp := fmt.Sprintf("Test metric to demonstrate forced corruption %d.", rand.Intn(1000))
|
||||||
labels10 := ""
|
labels10 := ""
|
||||||
for i := 0; i < labelsCount; i++ {
|
for range labelsCount {
|
||||||
labels10 += generateLabels()
|
labels10 += generateLabels()
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(`name: "%s"
|
return fmt.Sprintf(`name: "%s"
|
||||||
|
|
|
@ -142,7 +142,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||||
|
|
||||||
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||||
spans := make([]histogram.Span, len(s))
|
spans := make([]histogram.Span, len(s))
|
||||||
for i := 0; i < len(s); i++ {
|
for i := range s {
|
||||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
spans := make([]BucketSpan, len(s))
|
spans := make([]BucketSpan, len(s))
|
||||||
for i := 0; i < len(s); i++ {
|
for i := range s {
|
||||||
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,24 +43,24 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
|
||||||
// These metrics will have data for all test time range
|
// These metrics will have data for all test time range
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
|
metrics = append(metrics, labels.FromStrings("__name__", "a_one"))
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
|
metrics = append(metrics, labels.FromStrings("__name__", "b_one"))
|
||||||
for j := 0; j < 10; j++ {
|
for j := range 10 {
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", strconv.Itoa(j)))
|
metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", strconv.Itoa(j)))
|
||||||
}
|
}
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", "+Inf"))
|
metrics = append(metrics, labels.FromStrings("__name__", "h_one", "le", "+Inf"))
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := range 10 {
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "a_ten", "l", strconv.Itoa(i)))
|
metrics = append(metrics, labels.FromStrings("__name__", "a_ten", "l", strconv.Itoa(i)))
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "b_ten", "l", strconv.Itoa(i)))
|
metrics = append(metrics, labels.FromStrings("__name__", "b_ten", "l", strconv.Itoa(i)))
|
||||||
for j := 0; j < 10; j++ {
|
for j := range 10 {
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", strconv.Itoa(j)))
|
metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", strconv.Itoa(j)))
|
||||||
}
|
}
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", "+Inf"))
|
metrics = append(metrics, labels.FromStrings("__name__", "h_ten", "l", strconv.Itoa(i), "le", "+Inf"))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "a_hundred", "l", strconv.Itoa(i)))
|
metrics = append(metrics, labels.FromStrings("__name__", "a_hundred", "l", strconv.Itoa(i)))
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "b_hundred", "l", strconv.Itoa(i)))
|
metrics = append(metrics, labels.FromStrings("__name__", "b_hundred", "l", strconv.Itoa(i)))
|
||||||
for j := 0; j < 10; j++ {
|
for j := range 10 {
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", strconv.Itoa(j)))
|
metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", strconv.Itoa(j)))
|
||||||
}
|
}
|
||||||
metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", "+Inf"))
|
metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", "+Inf"))
|
||||||
|
@ -70,7 +70,7 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in
|
||||||
// Number points for each different label value of "l" for the sparse series
|
// Number points for each different label value of "l" for the sparse series
|
||||||
pointsPerSparseSeries := numIntervals / 50
|
pointsPerSparseSeries := numIntervals / 50
|
||||||
|
|
||||||
for s := 0; s < numIntervals; s++ {
|
for s := range numIntervals {
|
||||||
a := stor.Appender(context.Background())
|
a := stor.Appender(context.Background())
|
||||||
ts := int64(s * interval)
|
ts := int64(s * interval)
|
||||||
for i, metric := range metrics {
|
for i, metric := range metrics {
|
||||||
|
@ -525,7 +525,7 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
|
||||||
// Generate http_server_request_duration_seconds_count metrics with instance and job labels, and http_status_code label.
|
// Generate http_server_request_duration_seconds_count metrics with instance and job labels, and http_status_code label.
|
||||||
// the classic target_info metrics is gauge type.
|
// the classic target_info metrics is gauge type.
|
||||||
metrics := make([]labels.Labels, 0, infoSeriesNum+len(statusCodes))
|
metrics := make([]labels.Labels, 0, infoSeriesNum+len(statusCodes))
|
||||||
for i := 0; i < infoSeriesNum; i++ {
|
for i := range infoSeriesNum {
|
||||||
clusterName := "us-east"
|
clusterName := "us-east"
|
||||||
if i >= infoSeriesNum/2 {
|
if i >= infoSeriesNum/2 {
|
||||||
clusterName = "eu-south"
|
clusterName = "eu-south"
|
||||||
|
@ -550,7 +550,7 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage
|
||||||
// Append the generated metrics and samples to the storage.
|
// Append the generated metrics and samples to the storage.
|
||||||
refs := make([]storage.SeriesRef, len(metrics))
|
refs := make([]storage.SeriesRef, len(metrics))
|
||||||
|
|
||||||
for i := 0; i < numIntervals; i++ {
|
for i := range numIntervals {
|
||||||
a := stor.Appender(context.Background())
|
a := stor.Appender(context.Background())
|
||||||
ts := int64(i * interval)
|
ts := int64(i * interval)
|
||||||
for j, metric := range metrics[:infoSeriesNum] {
|
for j, metric := range metrics[:infoSeriesNum] {
|
||||||
|
|
|
@ -633,7 +633,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
|
||||||
logger := slog.New(l)
|
logger := slog.New(l)
|
||||||
f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append.
|
f := make([]slog.Attr, 0, 16) // Probably enough up front to not need to reallocate on append.
|
||||||
|
|
||||||
params := make(map[string]interface{}, 4)
|
params := make(map[string]any, 4)
|
||||||
params["query"] = q.q
|
params["query"] = q.q
|
||||||
if eq, ok := q.Statement().(*parser.EvalStmt); ok {
|
if eq, ok := q.Statement().(*parser.EvalStmt); ok {
|
||||||
params["start"] = formatDate(eq.Start)
|
params["start"] = formatDate(eq.Start)
|
||||||
|
@ -650,7 +650,7 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
|
||||||
f = append(f, slog.Any("spanID", span.SpanContext().SpanID()))
|
f = append(f, slog.Any("spanID", span.SpanContext().SpanID()))
|
||||||
}
|
}
|
||||||
if origin := ctx.Value(QueryOrigin{}); origin != nil {
|
if origin := ctx.Value(QueryOrigin{}); origin != nil {
|
||||||
for k, v := range origin.(map[string]interface{}) {
|
for k, v := range origin.(map[string]any) {
|
||||||
f = append(f, slog.Any(k, v))
|
f = append(f, slog.Any(k, v))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1082,7 +1082,7 @@ type evaluator struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorf causes a panic with the input formatted into an error.
|
// errorf causes a panic with the input formatted into an error.
|
||||||
func (ev *evaluator) errorf(format string, args ...interface{}) {
|
func (ev *evaluator) errorf(format string, args ...any) {
|
||||||
ev.error(fmt.Errorf(format, args...))
|
ev.error(fmt.Errorf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1792,10 +1792,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
|
||||||
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
|
mat := make(Matrix, 0, len(selVS.Series)) // Output matrix.
|
||||||
offset := durationMilliseconds(selVS.Offset)
|
offset := durationMilliseconds(selVS.Offset)
|
||||||
selRange := durationMilliseconds(sel.Range)
|
selRange := durationMilliseconds(sel.Range)
|
||||||
stepRange := selRange
|
stepRange := min(selRange, ev.interval)
|
||||||
if stepRange > ev.interval {
|
|
||||||
stepRange = ev.interval
|
|
||||||
}
|
|
||||||
// Reuse objects across steps to save memory allocations.
|
// Reuse objects across steps to save memory allocations.
|
||||||
var floats []FPoint
|
var floats []FPoint
|
||||||
var histograms []HPoint
|
var histograms []HPoint
|
||||||
|
@ -3327,10 +3324,7 @@ seriesLoop:
|
||||||
var r float64
|
var r float64
|
||||||
switch op {
|
switch op {
|
||||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
||||||
k = int64(fParam)
|
k = min(int64(fParam), int64(len(inputMatrix)))
|
||||||
if k > int64(len(inputMatrix)) {
|
|
||||||
k = int64(len(inputMatrix))
|
|
||||||
}
|
|
||||||
if k < 1 {
|
if k < 1 {
|
||||||
if enh.Ts != ev.endTimestamp {
|
if enh.Ts != ev.endTimestamp {
|
||||||
advanceRemainingSeries(enh.Ts, si+1)
|
advanceRemainingSeries(enh.Ts, si+1)
|
||||||
|
@ -3697,7 +3691,7 @@ func changesMetricSchema(op parser.ItemType) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOriginContext returns a new context with data about the origin attached.
|
// NewOriginContext returns a new context with data about the origin attached.
|
||||||
func NewOriginContext(ctx context.Context, data map[string]interface{}) context.Context {
|
func NewOriginContext(ctx context.Context, data map[string]any) context.Context {
|
||||||
return context.WithValue(ctx, QueryOrigin{}, data)
|
return context.WithValue(ctx, QueryOrigin{}, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ func TestQueryConcurrency(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < maxConcurrency; i++ {
|
for range maxConcurrency {
|
||||||
q := engine.NewTestQuery(f)
|
q := engine.NewTestQuery(f)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -134,7 +134,7 @@ func TestQueryConcurrency(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate remaining queries.
|
// Terminate remaining queries.
|
||||||
for i := 0; i < maxConcurrency; i++ {
|
for range maxConcurrency {
|
||||||
block <- struct{}{}
|
block <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2193,7 +2193,7 @@ func TestQueryLogger_basic(t *testing.T) {
|
||||||
engine.SetQueryLogger(f1)
|
engine.SetQueryLogger(f1)
|
||||||
queryExec()
|
queryExec()
|
||||||
logLines := getLogLines(t, ql1File)
|
logLines := getLogLines(t, ql1File)
|
||||||
require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"})
|
require.Contains(t, logLines[0], "params", map[string]any{"query": "test statement"})
|
||||||
require.Len(t, logLines, 1)
|
require.Len(t, logLines, 1)
|
||||||
|
|
||||||
l := len(logLines)
|
l := len(logLines)
|
||||||
|
@ -2246,7 +2246,7 @@ func TestQueryLogger_fields(t *testing.T) {
|
||||||
engine.SetQueryLogger(f1)
|
engine.SetQueryLogger(f1)
|
||||||
|
|
||||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||||
ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"})
|
ctx = promql.NewOriginContext(ctx, map[string]any{"foo": "bar"})
|
||||||
defer cancelCtx()
|
defer cancelCtx()
|
||||||
query := engine.NewTestQuery(func(ctx context.Context) error {
|
query := engine.NewTestQuery(func(ctx context.Context) error {
|
||||||
return contextDone(ctx, "test statement execution")
|
return contextDone(ctx, "test statement execution")
|
||||||
|
@ -2279,7 +2279,7 @@ func TestQueryLogger_error(t *testing.T) {
|
||||||
engine.SetQueryLogger(f1)
|
engine.SetQueryLogger(f1)
|
||||||
|
|
||||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||||
ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"})
|
ctx = promql.NewOriginContext(ctx, map[string]any{"foo": "bar"})
|
||||||
defer cancelCtx()
|
defer cancelCtx()
|
||||||
testErr := errors.New("failure")
|
testErr := errors.New("failure")
|
||||||
query := engine.NewTestQuery(func(context.Context) error {
|
query := engine.NewTestQuery(func(context.Context) error {
|
||||||
|
@ -2291,7 +2291,7 @@ func TestQueryLogger_error(t *testing.T) {
|
||||||
|
|
||||||
logLines := getLogLines(t, ql1File)
|
logLines := getLogLines(t, ql1File)
|
||||||
require.Contains(t, logLines[0], "error", testErr)
|
require.Contains(t, logLines[0], "error", testErr)
|
||||||
require.Contains(t, logLines[0], "params", map[string]interface{}{"query": "test statement"})
|
require.Contains(t, logLines[0], "params", map[string]any{"query": "test statement"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
|
func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {
|
||||||
|
@ -3339,7 +3339,6 @@ metric 0 1 2
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
c := c
|
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery)
|
engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery)
|
||||||
storage := promqltest.LoadedStorage(t, load)
|
storage := promqltest.LoadedStorage(t, load)
|
||||||
|
@ -3987,7 +3986,7 @@ func TestSubQueryHistogramsCopy(t *testing.T) {
|
||||||
testQuery := `rate({__name__="http_request_duration_seconds"}[3m])`
|
testQuery := `rate({__name__="http_request_duration_seconds"}[3m])`
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
queryable := promqltest.LoadedStorage(t, load)
|
queryable := promqltest.LoadedStorage(t, load)
|
||||||
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
|
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
|
||||||
|
|
||||||
|
@ -3998,7 +3997,7 @@ func TestSubQueryHistogramsCopy(t *testing.T) {
|
||||||
queryable.Close()
|
queryable.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
queryable := promqltest.LoadedStorage(t, load)
|
queryable := promqltest.LoadedStorage(t, load)
|
||||||
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
|
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
|
||||||
|
|
||||||
|
|
|
@ -230,10 +230,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra
|
||||||
// First iteration to find out two things:
|
// First iteration to find out two things:
|
||||||
// - What's the smallest relevant schema?
|
// - What's the smallest relevant schema?
|
||||||
// - Are all data points histograms?
|
// - Are all data points histograms?
|
||||||
minSchema := prev.Schema
|
minSchema := min(last.Schema, prev.Schema)
|
||||||
if last.Schema < minSchema {
|
|
||||||
minSchema = last.Schema
|
|
||||||
}
|
|
||||||
for _, currPoint := range points[1 : len(points)-1] {
|
for _, currPoint := range points[1 : len(points)-1] {
|
||||||
curr := currPoint.H
|
curr := currPoint.H
|
||||||
if curr == nil {
|
if curr == nil {
|
||||||
|
@ -1893,11 +1890,11 @@ func (s vectorByValueHeap) Swap(i, j int) {
|
||||||
s[i], s[j] = s[j], s[i]
|
s[i], s[j] = s[j], s[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *vectorByValueHeap) Push(x interface{}) {
|
func (s *vectorByValueHeap) Push(x any) {
|
||||||
*s = append(*s, *(x.(*Sample)))
|
*s = append(*s, *(x.(*Sample)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *vectorByValueHeap) Pop() interface{} {
|
func (s *vectorByValueHeap) Pop() any {
|
||||||
old := *s
|
old := *s
|
||||||
n := len(old)
|
n := len(old)
|
||||||
el := old[n-1]
|
el := old[n-1]
|
||||||
|
@ -1923,11 +1920,11 @@ func (s vectorByReverseValueHeap) Swap(i, j int) {
|
||||||
s[i], s[j] = s[j], s[i]
|
s[i], s[j] = s[j], s[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *vectorByReverseValueHeap) Push(x interface{}) {
|
func (s *vectorByReverseValueHeap) Push(x any) {
|
||||||
*s = append(*s, *(x.(*Sample)))
|
*s = append(*s, *(x.(*Sample)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *vectorByReverseValueHeap) Pop() interface{} {
|
func (s *vectorByReverseValueHeap) Pop() any {
|
||||||
old := *s
|
old := *s
|
||||||
n := len(old)
|
n := len(old)
|
||||||
el := old[n-1]
|
el := old[n-1]
|
||||||
|
@ -1975,7 +1972,7 @@ func stringFromArg(e parser.Expr) string {
|
||||||
|
|
||||||
func stringSliceFromArgs(args parser.Expressions) []string {
|
func stringSliceFromArgs(args parser.Expressions) []string {
|
||||||
tmp := make([]string, len(args))
|
tmp := make([]string, len(args))
|
||||||
for i := 0; i < len(args); i++ {
|
for i := range args {
|
||||||
tmp[i] = stringFromArg(args[i])
|
tmp[i] = stringFromArg(args[i])
|
||||||
}
|
}
|
||||||
return tmp
|
return tmp
|
||||||
|
|
|
@ -50,7 +50,7 @@ func TestDeriv(t *testing.T) {
|
||||||
interval = 30 * 1000
|
interval = 30 * 1000
|
||||||
// Introduce some timestamp jitter to test 0 slope case.
|
// Introduce some timestamp jitter to test 0 slope case.
|
||||||
// https://github.com/prometheus/prometheus/issues/7180
|
// https://github.com/prometheus/prometheus/issues/7180
|
||||||
for i = 0; i < 15; i++ {
|
for i = range int64(15) {
|
||||||
jitter := 12 * i % 2
|
jitter := 12 * i % 2
|
||||||
a.Append(0, metric, start+interval*i+jitter, 1)
|
a.Append(0, metric, start+interval*i+jitter, 1)
|
||||||
}
|
}
|
||||||
|
|
|
@ -347,7 +347,7 @@ func (l *Lexer) acceptRun(valid string) {
|
||||||
|
|
||||||
// errorf returns an error token and terminates the scan by passing
|
// errorf returns an error token and terminates the scan by passing
|
||||||
// back a nil pointer that will be the next state, terminating l.NextItem.
|
// back a nil pointer that will be the next state, terminating l.NextItem.
|
||||||
func (l *Lexer) errorf(format string, args ...interface{}) stateFn {
|
func (l *Lexer) errorf(format string, args ...any) stateFn {
|
||||||
*l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)}
|
*l.itemp = Item{ERROR, l.start, fmt.Sprintf(format, args...)}
|
||||||
l.scannedItem = true
|
l.scannedItem = true
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var parserPool = sync.Pool{
|
var parserPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() any {
|
||||||
return &parser{}
|
return &parser{}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ type parser struct {
|
||||||
|
|
||||||
yyParser yyParserImpl
|
yyParser yyParserImpl
|
||||||
|
|
||||||
generatedParserResult interface{}
|
generatedParserResult any
|
||||||
parseErrors ParseErrors
|
parseErrors ParseErrors
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue
|
||||||
}
|
}
|
||||||
|
|
||||||
// addParseErrf formats the error and appends it to the list of parsing errors.
|
// addParseErrf formats the error and appends it to the list of parsing errors.
|
||||||
func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...interface{}) {
|
func (p *parser) addParseErrf(positionRange posrange.PositionRange, format string, args ...any) {
|
||||||
p.addParseErr(positionRange, fmt.Errorf(format, args...))
|
p.addParseErr(positionRange, fmt.Errorf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -475,13 +475,13 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node, overread bool) (
|
||||||
}
|
}
|
||||||
|
|
||||||
// newMap is used when building the FloatHistogram from a map.
|
// newMap is used when building the FloatHistogram from a map.
|
||||||
func (*parser) newMap() (ret map[string]interface{}) {
|
func (*parser) newMap() (ret map[string]any) {
|
||||||
return map[string]interface{}{}
|
return map[string]any{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeMaps is used to combine maps as they're used to later build the Float histogram.
|
// mergeMaps is used to combine maps as they're used to later build the Float histogram.
|
||||||
// This will merge the right map into the left map.
|
// This will merge the right map into the left map.
|
||||||
func (p *parser) mergeMaps(left, right *map[string]interface{}) (ret *map[string]interface{}) {
|
func (p *parser) mergeMaps(left, right *map[string]any) (ret *map[string]any) {
|
||||||
for key, value := range *right {
|
for key, value := range *right {
|
||||||
if _, ok := (*left)[key]; ok {
|
if _, ok := (*left)[key]; ok {
|
||||||
p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key)
|
p.addParseErrf(posrange.PositionRange{}, "duplicate key \"%s\" in histogram", key)
|
||||||
|
@ -530,7 +530,7 @@ func (*parser) histogramsSeries(base, inc *histogram.FloatHistogram, times uint6
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
|
// buildHistogramFromMap is used in the grammar to take then individual parts of the histogram and complete it.
|
||||||
func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.FloatHistogram {
|
func (p *parser) buildHistogramFromMap(desc *map[string]any) *histogram.FloatHistogram {
|
||||||
output := &histogram.FloatHistogram{}
|
output := &histogram.FloatHistogram{}
|
||||||
|
|
||||||
val, ok := (*desc)["schema"]
|
val, ok := (*desc)["schema"]
|
||||||
|
@ -623,7 +623,7 @@ func (p *parser) buildHistogramFromMap(desc *map[string]interface{}) *histogram.
|
||||||
return output
|
return output
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]interface{}, bucketsKey, offsetKey string,
|
func (p *parser) buildHistogramBucketsAndSpans(desc *map[string]any, bucketsKey, offsetKey string,
|
||||||
) (buckets []float64, spans []histogram.Span) {
|
) (buckets []float64, spans []histogram.Span) {
|
||||||
bucketCount := 0
|
bucketCount := 0
|
||||||
val, ok := (*desc)[bucketsKey]
|
val, ok := (*desc)[bucketsKey]
|
||||||
|
@ -896,7 +896,7 @@ func parseDuration(ds string) (time.Duration, error) {
|
||||||
// parseGenerated invokes the yacc generated parser.
|
// parseGenerated invokes the yacc generated parser.
|
||||||
// The generated parser gets the provided startSymbol injected into
|
// The generated parser gets the provided startSymbol injected into
|
||||||
// the lexer stream, based on which grammar will be used.
|
// the lexer stream, based on which grammar will be used.
|
||||||
func (p *parser) parseGenerated(startSymbol ItemType) interface{} {
|
func (p *parser) parseGenerated(startSymbol ItemType) any {
|
||||||
p.InjectItem(startSymbol)
|
p.InjectItem(startSymbol)
|
||||||
|
|
||||||
p.yyParser.Parse(p)
|
p.yyParser.Parse(p)
|
||||||
|
|
|
@ -32,7 +32,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func repeatError(query string, err error, start, startStep, end, endStep, count int) (errs ParseErrors) {
|
func repeatError(query string, err error, start, startStep, end, endStep, count int) (errs ParseErrors) {
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
errs = append(errs, ParseErr{
|
errs = append(errs, ParseErr{
|
||||||
PositionRange: posrange.PositionRange{
|
PositionRange: posrange.PositionRange{
|
||||||
Start: posrange.Pos(start + (i * startStep)),
|
Start: posrange.Pos(start + (i * startStep)),
|
||||||
|
|
|
@ -61,12 +61,11 @@ func TestConcurrentRangeQueries(t *testing.T) {
|
||||||
// Limit the number of queries running at the same time.
|
// Limit the number of queries running at the same time.
|
||||||
const numConcurrent = 4
|
const numConcurrent = 4
|
||||||
sem := make(chan struct{}, numConcurrent)
|
sem := make(chan struct{}, numConcurrent)
|
||||||
for i := 0; i < numConcurrent; i++ {
|
for range numConcurrent {
|
||||||
sem <- struct{}{}
|
sem <- struct{}{}
|
||||||
}
|
}
|
||||||
var g errgroup.Group
|
var g errgroup.Group
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
c := c
|
|
||||||
if strings.Contains(c.expr, "count_values") && c.steps > 10 {
|
if strings.Contains(c.expr, "count_values") && c.steps > 10 {
|
||||||
continue // This test is too big to run with -race.
|
continue // This test is too big to run with -race.
|
||||||
}
|
}
|
||||||
|
|
|
@ -219,7 +219,7 @@ func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) }
|
||||||
//go:embed testdata
|
//go:embed testdata
|
||||||
var testsFs embed.FS
|
var testsFs embed.FS
|
||||||
|
|
||||||
func raise(line int, format string, v ...interface{}) error {
|
func raise(line int, format string, v ...any) error {
|
||||||
return &parser.ParseErr{
|
return &parser.ParseErr{
|
||||||
LineOffset: line,
|
LineOffset: line,
|
||||||
Err: fmt.Errorf(format, v...),
|
Err: fmt.Errorf(format, v...),
|
||||||
|
@ -1527,7 +1527,7 @@ func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
|
||||||
func (ll *LazyLoader) parse(input string) error {
|
func (ll *LazyLoader) parse(input string) error {
|
||||||
lines := getLines(input)
|
lines := getLines(input)
|
||||||
// Accepts only 'load' command.
|
// Accepts only 'load' command.
|
||||||
for i := 0; i < len(lines); i++ {
|
for i := range lines {
|
||||||
l := lines[i]
|
l := lines[i]
|
||||||
if len(l) == 0 {
|
if len(l) == 0 {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -195,7 +195,7 @@ func newJSONEntry(query string, logger *slog.Logger) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) {
|
func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) {
|
||||||
for i := 0; i < maxConcurrent; i++ {
|
for i := range maxConcurrent {
|
||||||
tracker.getNextIndex <- 1 + (i * entrySize)
|
tracker.getNextIndex <- 1 + (i * entrySize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ func TestQueryLogging(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for inserts of queries.
|
// Check for inserts of queries.
|
||||||
for i := 0; i < 4; i++ {
|
for i := range 4 {
|
||||||
start := 1 + i*entrySize
|
start := 1 + i*entrySize
|
||||||
end := start + entrySize
|
end := start + entrySize
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ func TestQueryLogging(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if all queries have been deleted.
|
// Check if all queries have been deleted.
|
||||||
for i := 0; i < 4; i++ {
|
for i := range 4 {
|
||||||
queryLogger.Delete(1 + i*entrySize)
|
queryLogger.Delete(1 + i*entrySize)
|
||||||
}
|
}
|
||||||
require.True(t, regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1:1+entrySize*4]),
|
require.True(t, regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1:1+entrySize*4]),
|
||||||
|
@ -94,7 +94,7 @@ func TestIndexReuse(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all bytes and verify new query was inserted at index 2
|
// Check all bytes and verify new query was inserted at index 2
|
||||||
for i := 0; i < 3; i++ {
|
for i := range 3 {
|
||||||
start := 1 + i*entrySize
|
start := 1 + i*entrySize
|
||||||
end := start + entrySize
|
end := start + entrySize
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (s String) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s String) MarshalJSON() ([]byte, error) {
|
func (s String) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V})
|
return json.Marshal([...]any{float64(s.T) / 1000, s.V})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scalar is a data point that's explicitly not associated with a metric.
|
// Scalar is a data point that's explicitly not associated with a metric.
|
||||||
|
@ -61,7 +61,7 @@ func (s Scalar) String() string {
|
||||||
|
|
||||||
func (s Scalar) MarshalJSON() ([]byte, error) {
|
func (s Scalar) MarshalJSON() ([]byte, error) {
|
||||||
v := strconv.FormatFloat(s.V, 'f', -1, 64)
|
v := strconv.FormatFloat(s.V, 'f', -1, 64)
|
||||||
return json.Marshal([...]interface{}{float64(s.T) / 1000, v})
|
return json.Marshal([...]any{float64(s.T) / 1000, v})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Series is a stream of data points belonging to a metric.
|
// Series is a stream of data points belonging to a metric.
|
||||||
|
@ -111,7 +111,7 @@ func (p FPoint) String() string {
|
||||||
// timestamp.
|
// timestamp.
|
||||||
func (p FPoint) MarshalJSON() ([]byte, error) {
|
func (p FPoint) MarshalJSON() ([]byte, error) {
|
||||||
v := strconv.FormatFloat(p.F, 'f', -1, 64)
|
v := strconv.FormatFloat(p.F, 'f', -1, 64)
|
||||||
return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
|
return json.Marshal([...]any{float64(p.T) / 1000, v})
|
||||||
}
|
}
|
||||||
|
|
||||||
// HPoint represents a single histogram data point for a given timestamp.
|
// HPoint represents a single histogram data point for a given timestamp.
|
||||||
|
@ -136,9 +136,9 @@ func (p HPoint) String() string {
|
||||||
// timestamp.
|
// timestamp.
|
||||||
func (p HPoint) MarshalJSON() ([]byte, error) {
|
func (p HPoint) MarshalJSON() ([]byte, error) {
|
||||||
h := struct {
|
h := struct {
|
||||||
Count string `json:"count"`
|
Count string `json:"count"`
|
||||||
Sum string `json:"sum"`
|
Sum string `json:"sum"`
|
||||||
Buckets [][]interface{} `json:"buckets,omitempty"`
|
Buckets [][]any `json:"buckets,omitempty"`
|
||||||
}{
|
}{
|
||||||
Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64),
|
Count: strconv.FormatFloat(p.H.Count, 'f', -1, 64),
|
||||||
Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64),
|
Sum: strconv.FormatFloat(p.H.Sum, 'f', -1, 64),
|
||||||
|
@ -161,7 +161,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
|
||||||
boundaries = 0 // Inclusive only on upper end AKA left open.
|
boundaries = 0 // Inclusive only on upper end AKA left open.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bucketToMarshal := []interface{}{
|
bucketToMarshal := []any{
|
||||||
boundaries,
|
boundaries,
|
||||||
strconv.FormatFloat(bucket.Lower, 'f', -1, 64),
|
strconv.FormatFloat(bucket.Lower, 'f', -1, 64),
|
||||||
strconv.FormatFloat(bucket.Upper, 'f', -1, 64),
|
strconv.FormatFloat(bucket.Upper, 'f', -1, 64),
|
||||||
|
@ -169,7 +169,7 @@ func (p HPoint) MarshalJSON() ([]byte, error) {
|
||||||
}
|
}
|
||||||
h.Buckets = append(h.Buckets, bucketToMarshal)
|
h.Buckets = append(h.Buckets, bucketToMarshal)
|
||||||
}
|
}
|
||||||
return json.Marshal([...]interface{}{float64(p.T) / 1000, h})
|
return json.Marshal([...]any{float64(p.T) / 1000, h})
|
||||||
}
|
}
|
||||||
|
|
||||||
// size returns the size of the HPoint compared to the size of an FPoint.
|
// size returns the size of the HPoint compared to the size of an FPoint.
|
||||||
|
|
|
@ -596,10 +596,7 @@ func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay
|
||||||
if alert.needsSending(ts, resendDelay) {
|
if alert.needsSending(ts, resendDelay) {
|
||||||
alert.LastSentAt = ts
|
alert.LastSentAt = ts
|
||||||
// Allow for two Eval or Alertmanager send failures.
|
// Allow for two Eval or Alertmanager send failures.
|
||||||
delta := resendDelay
|
delta := max(interval, resendDelay)
|
||||||
if interval > resendDelay {
|
|
||||||
delta = interval
|
|
||||||
}
|
|
||||||
alert.ValidUntil = ts.Add(4 * delta)
|
alert.ValidUntil = ts.Add(4 * delta)
|
||||||
anew := *alert
|
anew := *alert
|
||||||
// The notifier re-uses the labels slice, hence make a copy.
|
// The notifier re-uses the labels slice, hence make a copy.
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"maps"
|
||||||
"math"
|
"math"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -215,7 +216,7 @@ func (g *Group) run(ctx context.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx = promql.NewOriginContext(ctx, map[string]interface{}{
|
ctx = promql.NewOriginContext(ctx, map[string]any{
|
||||||
"ruleGroup": map[string]string{
|
"ruleGroup": map[string]string{
|
||||||
"file": g.File(),
|
"file": g.File(),
|
||||||
"name": g.Name(),
|
"name": g.Name(),
|
||||||
|
@ -482,9 +483,7 @@ func (g *Group) CopyState(from *Group) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for fp, a := range far.active {
|
maps.Copy(ar.active, far.active)
|
||||||
ar.active[fp] = a
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle deleted and unmatched duplicate rules.
|
// Handle deleted and unmatched duplicate rules.
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
maps0 "maps"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
|
@ -582,9 +583,7 @@ func FromMaps(maps ...map[string]string) labels.Labels {
|
||||||
mLables := make(map[string]string)
|
mLables := make(map[string]string)
|
||||||
|
|
||||||
for _, m := range maps {
|
for _, m := range maps {
|
||||||
for k, v := range m {
|
maps0.Copy(mLables, m)
|
||||||
mLables[k] = v
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return labels.FromMap(mLables)
|
return labels.FromMap(mLables)
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"maps"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -821,9 +822,7 @@ func TestUpdate(t *testing.T) {
|
||||||
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil)
|
err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, labels.EmptyLabels(), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for h, g := range ruleManager.groups {
|
maps.Copy(ogs, ruleManager.groups)
|
||||||
ogs[h] = g
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update interval and reload.
|
// Update interval and reload.
|
||||||
for i, g := range rgs.Groups {
|
for i, g := range rgs.Groups {
|
||||||
|
@ -2480,7 +2479,6 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
||||||
// Evaluate groups concurrently (like they normally do).
|
// Evaluate groups concurrently (like they normally do).
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, group := range groups {
|
for _, group := range groups {
|
||||||
group := group
|
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -2532,7 +2530,7 @@ func TestGroup_Eval_RaceConditionOnStoppingGroupEvaluationWhileRulesAreEvaluated
|
||||||
<-ruleManager.block
|
<-ruleManager.block
|
||||||
|
|
||||||
// Update the group a decent number of times to simulate start and stopping in the middle of an evaluation.
|
// Update the group a decent number of times to simulate start and stopping in the middle of an evaluation.
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
err := ruleManager.Update(time.Second, files, labels.EmptyLabels(), "", nil)
|
err := ruleManager.Update(time.Second, files, labels.EmptyLabels(), "", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(str
|
||||||
graceShut: make(chan struct{}),
|
graceShut: make(chan struct{}),
|
||||||
triggerReload: make(chan struct{}, 1),
|
triggerReload: make(chan struct{}, 1),
|
||||||
metrics: sm,
|
metrics: sm,
|
||||||
buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }),
|
buffers: pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) }),
|
||||||
}
|
}
|
||||||
|
|
||||||
m.metrics.setTargetMetadataCacheGatherer(m)
|
m.metrics.setTargetMetadataCacheGatherer(m)
|
||||||
|
|
|
@ -594,7 +594,7 @@ func TestManagerTargetsUpdates(t *testing.T) {
|
||||||
defer m.Stop()
|
defer m.Stop()
|
||||||
|
|
||||||
tgSent := make(map[string][]*targetgroup.Group)
|
tgSent := make(map[string][]*targetgroup.Group)
|
||||||
for x := 0; x < 10; x++ {
|
for x := range 10 {
|
||||||
tgSent[strconv.Itoa(x)] = []*targetgroup.Group{
|
tgSent[strconv.Itoa(x)] = []*targetgroup.Group{
|
||||||
{
|
{
|
||||||
Source: strconv.Itoa(x),
|
Source: strconv.Itoa(x),
|
||||||
|
@ -1056,7 +1056,7 @@ scrape_configs:
|
||||||
func TestUnregisterMetrics(t *testing.T) {
|
func TestUnregisterMetrics(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
// Check that all metrics can be unregistered, allowing a second manager to be created.
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
opts := Options{}
|
opts := Options{}
|
||||||
manager, err := NewManager(&opts, nil, nil, nil, reg)
|
manager, err := NewManager(&opts, nil, nil, nil, reg)
|
||||||
require.NotNil(t, manager)
|
require.NotNil(t, manager)
|
||||||
|
|
|
@ -1262,7 +1262,7 @@ func newScrapeLoop(ctx context.Context,
|
||||||
l = promslog.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
if buffers == nil {
|
if buffers == nil {
|
||||||
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
buffers = pool.New(1e3, 1e6, 3, func(sz int) any { return make([]byte, 0, sz) })
|
||||||
}
|
}
|
||||||
if cache == nil {
|
if cache == nil {
|
||||||
cache = newScrapeCache(metrics)
|
cache = newScrapeCache(metrics)
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
@ -459,7 +460,7 @@ func TestScrapePoolStop(t *testing.T) {
|
||||||
// clean them and the respective targets up. It must wait until each loop's
|
// clean them and the respective targets up. It must wait until each loop's
|
||||||
// stop function returned before returning itself.
|
// stop function returned before returning itself.
|
||||||
|
|
||||||
for i := 0; i < numTargets; i++ {
|
for i := range numTargets {
|
||||||
t := &Target{
|
t := &Target{
|
||||||
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
|
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
|
||||||
scrapeConfig: &config.ScrapeConfig{},
|
scrapeConfig: &config.ScrapeConfig{},
|
||||||
|
@ -547,7 +548,7 @@ func TestScrapePoolReload(t *testing.T) {
|
||||||
// loops and start new ones. A new loop must not be started before the preceding
|
// loops and start new ones. A new loop must not be started before the preceding
|
||||||
// one terminated.
|
// one terminated.
|
||||||
|
|
||||||
for i := 0; i < numTargets; i++ {
|
for i := range numTargets {
|
||||||
labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
|
labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
|
||||||
t := &Target{
|
t := &Target{
|
||||||
labels: labels,
|
labels: labels,
|
||||||
|
@ -569,9 +570,7 @@ func TestScrapePoolReload(t *testing.T) {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
||||||
beforeTargets := map[uint64]*Target{}
|
beforeTargets := map[uint64]*Target{}
|
||||||
for h, t := range sp.activeTargets {
|
maps.Copy(beforeTargets, sp.activeTargets)
|
||||||
beforeTargets[h] = t
|
|
||||||
}
|
|
||||||
|
|
||||||
reloadTime := time.Now()
|
reloadTime := time.Now()
|
||||||
|
|
||||||
|
@ -691,7 +690,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tgs := []*targetgroup.Group{}
|
tgs := []*targetgroup.Group{}
|
||||||
for i := 0; i < 50; i++ {
|
for i := range 50 {
|
||||||
tgs = append(tgs,
|
tgs = append(tgs,
|
||||||
&targetgroup.Group{
|
&targetgroup.Group{
|
||||||
Targets: []model.LabelSet{
|
Targets: []model.LabelSet{
|
||||||
|
@ -904,7 +903,7 @@ func TestScrapePoolRaces(t *testing.T) {
|
||||||
require.Len(t, active, expectedActive, "Invalid number of active targets")
|
require.Len(t, active, expectedActive, "Invalid number of active targets")
|
||||||
require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets")
|
require.Len(t, dropped, expectedDropped, "Invalid number of dropped targets")
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
for range 20 {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
sp.reload(newConfig())
|
sp.reload(newConfig())
|
||||||
}
|
}
|
||||||
|
@ -1437,7 +1436,7 @@ func makeTestGauges(n int) []byte {
|
||||||
sb := bytes.Buffer{}
|
sb := bytes.Buffer{}
|
||||||
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
||||||
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
|
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(&sb, "# EOF\n")
|
fmt.Fprintf(&sb, "# EOF\n")
|
||||||
|
@ -1817,7 +1816,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
|
||||||
numScrapes++
|
numScrapes++
|
||||||
if numScrapes < 5 {
|
if numScrapes < 5 {
|
||||||
s := ""
|
s := ""
|
||||||
for i := 0; i < 500; i++ {
|
for i := range 500 {
|
||||||
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
|
s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes)
|
||||||
}
|
}
|
||||||
w.Write([]byte(s + "&"))
|
w.Write([]byte(s + "&"))
|
||||||
|
@ -1929,7 +1928,7 @@ func TestScrapeLoopAppend(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
|
func requireEqual(t *testing.T, expected, actual any, msgAndArgs ...any) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
testutil.RequireEqualWithOptions(t, expected, actual,
|
testutil.RequireEqualWithOptions(t, expected, actual,
|
||||||
[]cmp.Option{cmp.Comparer(equalFloatSamples), cmp.AllowUnexported(histogramSample{})},
|
[]cmp.Option{cmp.Comparer(equalFloatSamples), cmp.AllowUnexported(histogramSample{})},
|
||||||
|
@ -3894,7 +3893,7 @@ func TestReuseCacheRace(t *testing.T) {
|
||||||
MetricNameValidationScheme: model.UTF8Validation,
|
MetricNameValidationScheme: model.UTF8Validation,
|
||||||
MetricNameEscapingScheme: model.AllowUTF8,
|
MetricNameEscapingScheme: model.AllowUTF8,
|
||||||
}
|
}
|
||||||
buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
buffers = pool.New(1e3, 100e6, 3, func(sz int) any { return make([]byte, 0, sz) })
|
||||||
sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
|
sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
|
||||||
t1 = &Target{
|
t1 = &Target{
|
||||||
labels: labels.FromStrings("labelNew", "nameNew"),
|
labels: labels.FromStrings("labelNew", "nameNew"),
|
||||||
|
@ -4357,7 +4356,7 @@ func TestConvertClassicHistogramsToNHCB(t *testing.T) {
|
||||||
`, name, value)
|
`, name, value)
|
||||||
}
|
}
|
||||||
genTestHistText := func(name string, withMetadata bool) string {
|
genTestHistText := func(name string, withMetadata bool) string {
|
||||||
data := map[string]interface{}{
|
data := map[string]any{
|
||||||
"name": name,
|
"name": name,
|
||||||
}
|
}
|
||||||
b := &bytes.Buffer{}
|
b := &bytes.Buffer{}
|
||||||
|
@ -5145,7 +5144,7 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
|
||||||
{metricsCount: 100000},
|
{metricsCount: 100000},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(scenarios); i++ {
|
for i := range scenarios {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
var name string
|
var name string
|
||||||
gw := gzip.NewWriter(&buf)
|
gw := gzip.NewWriter(&buf)
|
||||||
|
@ -5266,7 +5265,6 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for name, tc := range testcases {
|
for name, tc := range testcases {
|
||||||
tc := tc
|
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema)
|
testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema)
|
||||||
|
|
|
@ -489,7 +489,7 @@ scrape_configs:
|
||||||
for _, nTargets := range []int{1, 10, 100} {
|
for _, nTargets := range []int{1, 10, 100} {
|
||||||
b.Run(fmt.Sprintf("%d_targets", nTargets), func(b *testing.B) {
|
b.Run(fmt.Sprintf("%d_targets", nTargets), func(b *testing.B) {
|
||||||
targets := []model.LabelSet{}
|
targets := []model.LabelSet{}
|
||||||
for i := 0; i < nTargets; i++ {
|
for i := range nTargets {
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
model.AddressLabel: model.LabelValue(fmt.Sprintf("localhost:%d", i)),
|
model.AddressLabel: model.LabelValue(fmt.Sprintf("localhost:%d", i)),
|
||||||
"__meta_kubernetes_namespace": "some_namespace",
|
"__meta_kubernetes_namespace": "some_namespace",
|
||||||
|
@ -501,7 +501,7 @@ scrape_configs:
|
||||||
"__meta_kubernetes_pod_phase": "Running",
|
"__meta_kubernetes_pod_phase": "Running",
|
||||||
}
|
}
|
||||||
// Add some more labels, because Kubernetes SD generates a lot
|
// Add some more labels, because Kubernetes SD generates a lot
|
||||||
for i := 0; i < 10; i++ {
|
for i := range 10 {
|
||||||
labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_label_extra%d", i))] = "a_label_abcdefgh"
|
labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_label_extra%d", i))] = "a_label_abcdefgh"
|
||||||
labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_labelpresent_extra%d", i))] = "true"
|
labels[model.LabelName(fmt.Sprintf("__meta_kubernetes_pod_labelpresent_extra%d", i))] = "true"
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,10 +233,7 @@ func (q *mergeGenericQuerier) mergeResults(lq labelGenericQueriers, hints *Label
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeStrings(a, b []string) []string {
|
func mergeStrings(a, b []string) []string {
|
||||||
maxl := len(a)
|
maxl := max(len(b), len(a))
|
||||||
if len(b) > len(a) {
|
|
||||||
maxl = len(b)
|
|
||||||
}
|
|
||||||
res := make([]string, 0, maxl*10/9)
|
res := make([]string, 0, maxl*10/9)
|
||||||
|
|
||||||
for len(a) > 0 && len(b) > 0 {
|
for len(a) > 0 && len(b) > 0 {
|
||||||
|
@ -440,11 +437,11 @@ func (h genericSeriesSetHeap) Less(i, j int) bool {
|
||||||
return labels.Compare(a, b) < 0
|
return labels.Compare(a, b) < 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *genericSeriesSetHeap) Push(x interface{}) {
|
func (h *genericSeriesSetHeap) Push(x any) {
|
||||||
*h = append(*h, x.(genericSeriesSet))
|
*h = append(*h, x.(genericSeriesSet))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *genericSeriesSetHeap) Pop() interface{} {
|
func (h *genericSeriesSetHeap) Pop() any {
|
||||||
old := *h
|
old := *h
|
||||||
n := len(old)
|
n := len(old)
|
||||||
x := old[n-1]
|
x := old[n-1]
|
||||||
|
@ -698,11 +695,11 @@ func (h samplesIteratorHeap) Less(i, j int) bool {
|
||||||
return h[i].AtT() < h[j].AtT()
|
return h[i].AtT() < h[j].AtT()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *samplesIteratorHeap) Push(x interface{}) {
|
func (h *samplesIteratorHeap) Push(x any) {
|
||||||
*h = append(*h, x.(chunkenc.Iterator))
|
*h = append(*h, x.(chunkenc.Iterator))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *samplesIteratorHeap) Pop() interface{} {
|
func (h *samplesIteratorHeap) Pop() any {
|
||||||
old := *h
|
old := *h
|
||||||
n := len(old)
|
n := len(old)
|
||||||
x := old[n-1]
|
x := old[n-1]
|
||||||
|
@ -846,11 +843,11 @@ func (h chunkIteratorHeap) Less(i, j int) bool {
|
||||||
return at.MinTime < bt.MinTime
|
return at.MinTime < bt.MinTime
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *chunkIteratorHeap) Push(x interface{}) {
|
func (h *chunkIteratorHeap) Push(x any) {
|
||||||
*h = append(*h, x.(chunks.Iterator))
|
*h = append(*h, x.(chunks.Iterator))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *chunkIteratorHeap) Pop() interface{} {
|
func (h *chunkIteratorHeap) Pop() any {
|
||||||
old := *h
|
old := *h
|
||||||
n := len(old)
|
n := len(old)
|
||||||
x := old[n-1]
|
x := old[n-1]
|
||||||
|
|
|
@ -1329,10 +1329,10 @@ func TestChainSampleIteratorSeekHistogramCounterResetHint(t *testing.T) {
|
||||||
|
|
||||||
func makeSeries(numSeries, numSamples int) []Series {
|
func makeSeries(numSeries, numSamples int) []Series {
|
||||||
series := []Series{}
|
series := []Series{}
|
||||||
for j := 0; j < numSeries; j++ {
|
for j := range numSeries {
|
||||||
labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
labels := labels.FromStrings("foo", fmt.Sprintf("bar%d", j))
|
||||||
samples := []chunks.Sample{}
|
samples := []chunks.Sample{}
|
||||||
for k := 0; k < numSamples; k++ {
|
for k := range numSamples {
|
||||||
samples = append(samples, fSample{t: int64(k), f: float64(k)})
|
samples = append(samples, fSample{t: int64(k), f: float64(k)})
|
||||||
}
|
}
|
||||||
series = append(series, NewListSeries(labels, samples))
|
series = append(series, NewListSeries(labels, samples))
|
||||||
|
@ -1393,9 +1393,9 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
|
||||||
func BenchmarkMergeLabelValuesWithLimit(b *testing.B) {
|
func BenchmarkMergeLabelValuesWithLimit(b *testing.B) {
|
||||||
var queriers []genericQuerier
|
var queriers []genericQuerier
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := range 5 {
|
||||||
var lbls []string
|
var lbls []string
|
||||||
for j := 0; j < 100000; j++ {
|
for j := range 100000 {
|
||||||
lbls = append(lbls, fmt.Sprintf("querier_%d_label_%d", i, j))
|
lbls = append(lbls, fmt.Sprintf("querier_%d_label_%d", i, j))
|
||||||
}
|
}
|
||||||
q := &mockQuerier{resp: lbls}
|
q := &mockQuerier{resp: lbls}
|
||||||
|
@ -1680,7 +1680,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check slice but ignore difference between nil and empty.
|
// Check slice but ignore difference between nil and empty.
|
||||||
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) {
|
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...any) {
|
||||||
if len(a) == 0 {
|
if len(a) == 0 {
|
||||||
require.Empty(t, b, msgAndArgs...)
|
require.Empty(t, b, msgAndArgs...)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -215,7 +215,7 @@ func (c *AzureADConfig) Validate() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML unmarshal the Azure AD config yaml.
|
// UnmarshalYAML unmarshal the Azure AD config yaml.
|
||||||
func (c *AzureADConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *AzureADConfig) UnmarshalYAML(unmarshal func(any) error) error {
|
||||||
type plain AzureADConfig
|
type plain AzureADConfig
|
||||||
*c = AzureADConfig{}
|
*c = AzureADConfig{}
|
||||||
if err := unmarshal((*plain)(c)); err != nil {
|
if err := unmarshal((*plain)(c)); err != nil {
|
||||||
|
|
|
@ -1017,7 +1017,7 @@ func createSampledResponseHandler(t *testing.T, queries []*prompb.Query) http.Ha
|
||||||
var timeseries []*prompb.TimeSeries
|
var timeseries []*prompb.TimeSeries
|
||||||
|
|
||||||
// Create 2 series per query
|
// Create 2 series per query
|
||||||
for seriesIndex := 0; seriesIndex < 2; seriesIndex++ {
|
for seriesIndex := range 2 {
|
||||||
var labels []prompb.Label
|
var labels []prompb.Label
|
||||||
if queryIndex == 0 {
|
if queryIndex == 0 {
|
||||||
labels = []prompb.Label{
|
labels = []prompb.Label{
|
||||||
|
|
|
@ -537,7 +537,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
|
||||||
require.Equal(t, expected, fh)
|
require.Equal(t, expected, fh)
|
||||||
|
|
||||||
// Keep calling Next() until the end.
|
// Keep calling Next() until the end.
|
||||||
for i := 0; i < 3; i++ {
|
for range 3 {
|
||||||
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1025,7 +1025,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk {
|
||||||
|
|
||||||
time := startTime
|
time := startTime
|
||||||
|
|
||||||
for i := 0; i < numTestChunks; i++ {
|
for i := range numTestChunks {
|
||||||
c := chunkenc.NewXORChunk()
|
c := chunkenc.NewXORChunk()
|
||||||
|
|
||||||
a, err := c.Appender()
|
a, err := c.Appender()
|
||||||
|
@ -1033,7 +1033,7 @@ func buildTestChunks(t *testing.T) []prompb.Chunk {
|
||||||
|
|
||||||
minTimeMs := time
|
minTimeMs := time
|
||||||
|
|
||||||
for j := 0; j < numSamplesPerTestChunk; j++ {
|
for j := range numSamplesPerTestChunk {
|
||||||
a.Append(time, float64(i+j))
|
a.Append(time, float64(i+j))
|
||||||
time += int64(1000)
|
time += int64(1000)
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,7 +151,7 @@ func TestDialContextWithRandomConnections(t *testing.T) {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
dc := tc.setup()
|
dc := tc.setup()
|
||||||
require.NotNil(t, dc)
|
require.NotNil(t, dc)
|
||||||
for i := 0; i < numberOfRuns; i++ {
|
for range numberOfRuns {
|
||||||
_, err := dc.dialContextFn()(context.Background(), testNetwork, tc.addr)
|
_, err := dc.dialContextFn()(context.Background(), testNetwork, tc.addr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
require.Equal(t, int64(1), interned.refs.Load(), "wrong interned refs count")
|
require.Equal(t, int64(1), interned.refs.Load(), "wrong interned refs count")
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
for range 1000 {
|
||||||
released := make(chan struct{})
|
released := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
interner.release(testString)
|
interner.release(testString)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue