diff --git a/.github/workflows/release-npm.yml b/.github/workflows/release-npm.yml index e4bed54a7f8..4dd92e9d0a1 100644 --- a/.github/workflows/release-npm.yml +++ b/.github/workflows/release-npm.yml @@ -39,12 +39,14 @@ permissions: {} jobs: # If called with version_type 'canary' or 'stable', build + publish to NPM - # If called with version_type 'nightly', just tag the given version with nightly tag. It was already published by the canary build. + # If called with version_type 'nightly', do nothing (we're not yet tagging them with the nightly tag) publish: name: Publish NPM packages runs-on: github-hosted-ubuntu-x64-small if: inputs.version_type == 'canary' || inputs.version_type == 'stable' + # Required for this workflow to have permission to publish NPM packages + environment: npm-publish permissions: contents: read id-token: write @@ -130,18 +132,3 @@ jobs: env: NPM_TAG: ${{ steps.npm-tag.outputs.NPM_TAG }} run: ./scripts/publish-npm-packages.sh --dist-tag "$NPM_TAG" --registry 'https://registry.npmjs.org/' - - # TODO: finish this step - tag-nightly: - name: Tag nightly release - runs-on: github-hosted-ubuntu-x64-small - if: inputs.version_type == 'nightly' - - steps: - - name: Checkout workflow ref - uses: actions/checkout@v4 - with: - persist-credentials: false - - # TODO: tag the given release with nightly - diff --git a/apps/alerting/alertenrichment/go.mod b/apps/alerting/alertenrichment/go.mod index c36ab96836a..aa670ef15ed 100644 --- a/apps/alerting/alertenrichment/go.mod +++ b/apps/alerting/alertenrichment/go.mod @@ -28,7 +28,7 @@ require ( go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/net v0.44.0 // indirect golang.org/x/text v0.29.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/apps/alerting/alertenrichment/go.sum b/apps/alerting/alertenrichment/go.sum index 0f5080062e6..1ff737c6fbd 100644 --- a/apps/alerting/alertenrichment/go.sum +++ b/apps/alerting/alertenrichment/go.sum @@ -91,8 +91,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/alerting/notifications/go.mod b/apps/alerting/notifications/go.mod index 36fc4da5f16..4f5e6fc625c 100644 --- a/apps/alerting/notifications/go.mod +++ b/apps/alerting/notifications/go.mod @@ -94,7 +94,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/apps/alerting/notifications/go.sum b/apps/alerting/notifications/go.sum index d8686fa8e79..793458ef166 100644 --- a/apps/alerting/notifications/go.sum +++ b/apps/alerting/notifications/go.sum @@ -284,8 +284,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go. google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/alerting/rules/go.mod b/apps/alerting/rules/go.mod index 16021cb3241..99aab42dd9a 100644 --- a/apps/alerting/rules/go.mod +++ b/apps/alerting/rules/go.mod @@ -77,7 +77,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/alerting/rules/go.sum b/apps/alerting/rules/go.sum index b34bb79ec13..396ebebc9de 100644 --- a/apps/alerting/rules/go.sum +++ b/apps/alerting/rules/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/correlations/Makefile b/apps/correlations/Makefile index 230bfd4149a..bc8d6d30cb5 100644 --- a/apps/correlations/Makefile +++ b/apps/correlations/Makefile @@ -6,4 +6,5 @@ generate: install-app-sdk update-app-sdk --source=./kinds/ \ --gogenpath=./pkg/apis \ --grouping=group \ + --genoperatorstate=false \ --defencoding=none \ No newline at end of file diff --git a/apps/correlations/go.mod b/apps/correlations/go.mod index 2e66709bda6..7599bad88d5 100644 --- a/apps/correlations/go.mod +++ b/apps/correlations/go.mod @@ -79,7 +79,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/correlations/go.sum b/apps/correlations/go.sum index b34bb79ec13..396ebebc9de 100644 --- a/apps/correlations/go.sum +++ b/apps/correlations/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go index 482d497e154..fe8f518b852 100644 --- a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go +++ b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type CorrelationClient struct { @@ -76,24 +75,6 @@ func (c *CorrelationClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *CorrelationClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus CorrelationStatus, opts resource.UpdateOptions) (*Correlation, error) { - return c.client.Update(ctx, &Correlation{ - TypeMeta: metav1.TypeMeta{ - Kind: CorrelationKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *CorrelationClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go index d10cb114aa8..3191e415977 100644 --- a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go +++ b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go @@ -21,8 +21,6 @@ type Correlation struct { // Spec is the spec of the Correlation Spec CorrelationSpec `json:"spec" yaml:"spec"` - - Status CorrelationStatus `json:"status" yaml:"status"` } func (o *Correlation) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Correlation) SetSpec(spec any) error { } func (o *Correlation) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Correlation) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Correlation) GetSubresource(name string) (any, bool) { func (o *Correlation) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(CorrelationStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type CorrelationStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Correlation) DeepCopyInto(dst *Correlation) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *CorrelationSpec) DeepCopy() *CorrelationSpec { func (s *CorrelationSpec) DeepCopyInto(dst *CorrelationSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of CorrelationStatus -func (s *CorrelationStatus) DeepCopy() *CorrelationStatus { - cpy := &CorrelationStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies CorrelationStatus into another CorrelationStatus object -func (s *CorrelationStatus) DeepCopyInto(dst *CorrelationStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_status_gen.go b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_status_gen.go deleted file mode 100644 index 5b8bc9f8088..00000000000 --- a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_status_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -package v0alpha1 - -// +k8s:openapi-gen=true -type CorrelationstatusOperatorState struct { - // lastEvaluation is the ResourceVersion last evaluated - LastEvaluation string `json:"lastEvaluation"` - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - State CorrelationStatusOperatorStateState `json:"state"` - // descriptiveState is an optional more descriptive state field which has no requirements on format - DescriptiveState *string `json:"descriptiveState,omitempty"` - // details contains any extra information that is operator-specific - Details map[string]interface{} `json:"details,omitempty"` -} - -// NewCorrelationstatusOperatorState creates a new CorrelationstatusOperatorState object. -func NewCorrelationstatusOperatorState() *CorrelationstatusOperatorState { - return &CorrelationstatusOperatorState{} -} - -// +k8s:openapi-gen=true -type CorrelationStatus struct { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - OperatorStates map[string]CorrelationstatusOperatorState `json:"operatorStates,omitempty"` - // additionalFields is reserved for future use - AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"` -} - -// NewCorrelationStatus creates a new CorrelationStatus object. -func NewCorrelationStatus() *CorrelationStatus { - return &CorrelationStatus{} -} - -// +k8s:openapi-gen=true -type CorrelationStatusOperatorStateState string - -const ( - CorrelationStatusOperatorStateStateSuccess CorrelationStatusOperatorStateState = "success" - CorrelationStatusOperatorStateStateInProgress CorrelationStatusOperatorStateState = "in_progress" - CorrelationStatusOperatorStateStateFailed CorrelationStatusOperatorStateState = "failed" -) diff --git a/apps/correlations/pkg/apis/correlation_manifest.go b/apps/correlations/pkg/apis/correlation_manifest.go index 293633bff4f..4ef118d2216 100644 --- a/apps/correlations/pkg/apis/correlation_manifest.go +++ b/apps/correlations/pkg/apis/correlation_manifest.go @@ -10,17 +10,16 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kube-openapi/pkg/spec3" - "github.com/grafana/grafana-app-sdk/app" "github.com/grafana/grafana-app-sdk/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kube-openapi/pkg/spec3" v0alpha1 "github.com/grafana/grafana/apps/correlations/pkg/apis/correlation/v0alpha1" ) var ( - rawSchemaCorrelationv0alpha1 = []byte(`{"ConfigSpec":{"additionalProperties":false,"description":"there was a deprecated field here called type, we will need to move that for conversion and provisioning","properties":{"field":{"type":"string"},"target":{"$ref":"#/components/schemas/TargetSpec"},"transformations":{"items":{"$ref":"#/components/schemas/TransformationSpec"},"type":"array"}},"required":["field","target"],"type":"object"},"Correlation":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"CorrelationType":{"enum":["query","external"],"type":"string"},"DataSourceRef":{"additionalProperties":false,"properties":{"group":{"description":"same as pluginId","type":"string"},"name":{"description":"same as grafana uid","type":"string"}},"required":["group","name"],"type":"object"},"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"TargetSpec":{"additionalProperties":{"additionalProperties":{},"type":"object"},"type":"object"},"TransformationSpec":{"additionalProperties":false,"properties":{"expression":{"type":"string"},"field":{"type":"string"},"mapValue":{"type":"string"},"type":{"type":"string"}},"required":["type","expression","field","mapValue"],"type":"object"},"spec":{"additionalProperties":false,"properties":{"config":{"$ref":"#/components/schemas/ConfigSpec"},"description":{"type":"string"},"label":{"type":"string"},"provisioned":{"type":"boolean"},"source_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"target_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"type":{"$ref":"#/components/schemas/CorrelationType"}},"required":["source_ds_ref","label","config","provisioned","type"],"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"additionalFields is reserved for future use","type":"object"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"type":"object"}}`) + rawSchemaCorrelationv0alpha1 = []byte(`{"ConfigSpec":{"additionalProperties":false,"description":"there was a deprecated field here called type, we will need to move that for conversion and provisioning","properties":{"field":{"type":"string"},"target":{"$ref":"#/components/schemas/TargetSpec"},"transformations":{"items":{"$ref":"#/components/schemas/TransformationSpec"},"type":"array"}},"required":["field","target"],"type":"object"},"Correlation":{"properties":{"spec":{"$ref":"#/components/schemas/spec"}},"required":["spec"]},"CorrelationType":{"enum":["query","external"],"type":"string"},"DataSourceRef":{"additionalProperties":false,"properties":{"group":{"description":"same as pluginId","type":"string"},"name":{"description":"same as grafana uid","type":"string"}},"required":["group","name"],"type":"object"},"TargetSpec":{"additionalProperties":{"additionalProperties":{},"type":"object"},"type":"object"},"TransformationSpec":{"additionalProperties":false,"properties":{"expression":{"type":"string"},"field":{"type":"string"},"mapValue":{"type":"string"},"type":{"type":"string"}},"required":["type","expression","field","mapValue"],"type":"object"},"spec":{"additionalProperties":false,"properties":{"config":{"$ref":"#/components/schemas/ConfigSpec"},"description":{"type":"string"},"label":{"type":"string"},"provisioned":{"type":"boolean"},"source_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"target_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"type":{"$ref":"#/components/schemas/CorrelationType"}},"required":["source_ds_ref","label","config","provisioned","type"],"type":"object"}}`) versionSchemaCorrelationv0alpha1 app.VersionSchema _ = json.Unmarshal(rawSchemaCorrelationv0alpha1, &versionSchemaCorrelationv0alpha1) ) diff --git a/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts b/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts index ff5b1b8f03f..05bbaf69516 100644 --- a/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts +++ b/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts @@ -2,7 +2,6 @@ * This file was generated by grafana-app-sdk. DO NOT EDIT. */ import { Spec } from './types.spec.gen'; -import { Status } from './types.status.gen'; export interface Metadata { name: string; @@ -45,5 +44,4 @@ export interface Correlation { apiVersion: string; metadata: Metadata; spec: Spec; - status: Status; } diff --git a/apps/correlations/plugin/src/generated/correlation/v0alpha1/types.status.gen.ts b/apps/correlations/plugin/src/generated/correlation/v0alpha1/types.status.gen.ts deleted file mode 100644 index 01be8df7961..00000000000 --- a/apps/correlations/plugin/src/generated/correlation/v0alpha1/types.status.gen.ts +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -export interface OperatorState { - // lastEvaluation is the ResourceVersion last evaluated - lastEvaluation: string; - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - state: "success" | "in_progress" | "failed"; - // descriptiveState is an optional more descriptive state field which has no requirements on format - descriptiveState?: string; - // details contains any extra information that is operator-specific - details?: Record; -} - -export const defaultOperatorState = (): OperatorState => ({ - lastEvaluation: "", - state: "success", -}); - -export interface Status { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - operatorStates?: Record; - // additionalFields is reserved for future use - additionalFields?: Record; -} - -export const defaultStatus = (): Status => ({ -}); - diff --git a/apps/dashboard/go.mod b/apps/dashboard/go.mod index 52679f30ed5..01ace00792d 100644 --- a/apps/dashboard/go.mod +++ b/apps/dashboard/go.mod @@ -4,7 +4,7 @@ go 1.24.6 require ( cuelang.org/go v0.11.1 - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 github.com/grafana/grafana-app-sdk v0.45.0 github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana-plugin-sdk-go v0.279.0 @@ -51,7 +51,7 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/otel-profiling-go v0.5.1 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect @@ -134,7 +134,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/apps/dashboard/go.sum b/apps/dashboard/go.sum index 6dc1a3798fd..13b5f650649 100644 --- a/apps/dashboard/go.sum +++ b/apps/dashboard/go.sum @@ -95,10 +95,10 @@ github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25d github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.45.0 h1:niFqYovxuw9vnUB9qoxEgmupqriG7Gns9ZGwB2uuOyE= @@ -386,8 +386,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/folder/go.mod b/apps/folder/go.mod index 92db7679c96..2772136fc3b 100644 --- a/apps/folder/go.mod +++ b/apps/folder/go.mod @@ -55,7 +55,7 @@ require ( golang.org/x/term v0.35.0 // indirect golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.13.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/client-go v0.34.1 // indirect diff --git a/apps/folder/go.sum b/apps/folder/go.sum index 404d42859b2..37e7a4c1159 100644 --- a/apps/folder/go.sum +++ b/apps/folder/go.sum @@ -152,8 +152,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/iam/Makefile b/apps/iam/Makefile index 71b00aca821..c11c35f7a90 100644 --- a/apps/iam/Makefile +++ b/apps/iam/Makefile @@ -8,6 +8,7 @@ generate: install-app-sdk update-app-sdk ## Run Grafana App SDK code generation --grouping=group \ --defencoding=none \ --noschemasinmanifest \ + --genoperatorstate=false \ --postprocess .PHONY: deps diff --git a/apps/iam/go.mod b/apps/iam/go.mod index 1e77d44fd5d..9339a260a4b 100644 --- a/apps/iam/go.mod +++ b/apps/iam/go.mod @@ -21,7 +21,7 @@ replace github.com/grafana/grafana/pkg/aggregator => ../../pkg/aggregator replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604 require ( - github.com/grafana/grafana v6.1.6+incompatible + github.com/grafana/grafana v0.0.0-00010101000000-000000000000 github.com/grafana/grafana-app-sdk v0.45.0 github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana/apps/folder v0.0.0 @@ -202,8 +202,8 @@ require ( github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect github.com/grafana/dataplane/sdata v0.0.9 // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/grafana-aws-sdk v1.2.0 // indirect @@ -422,7 +422,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect diff --git a/apps/iam/go.sum b/apps/iam/go.sum index ac09e1a53d7..b9188d6bd5f 100644 --- a/apps/iam/go.sum +++ b/apps/iam/go.sum @@ -723,10 +723,10 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d h1:zzEty7HgfXbQ/RiBCJFMqaZiJlqiXuz/Zbc6/H6ksuM= github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d/go.mod h1:T5sitas9VhVj8/S9LeRLy6H75kTBdh/sCCqHo7gaQI8= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA= github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg= github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= @@ -1994,8 +1994,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go index ae553035b02..371c97fdf4b 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type CoreRoleClient struct { @@ -76,24 +75,6 @@ func (c *CoreRoleClient) Patch(ctx context.Context, identifier resource.Identifi return c.client.Patch(ctx, identifier, req, opts) } -func (c *CoreRoleClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus CoreRoleStatus, opts resource.UpdateOptions) (*CoreRole, error) { - return c.client.Update(ctx, &CoreRole{ - TypeMeta: metav1.TypeMeta{ - Kind: CoreRoleKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *CoreRoleClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go index 4aba32da755..625cced10b9 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go @@ -21,8 +21,6 @@ type CoreRole struct { // Spec is the spec of the CoreRole Spec CoreRoleSpec `json:"spec" yaml:"spec"` - - Status CoreRoleStatus `json:"status" yaml:"status"` } func (o *CoreRole) GetSpec() any { @@ -39,15 +37,11 @@ func (o *CoreRole) SetSpec(spec any) error { } func (o *CoreRole) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *CoreRole) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *CoreRole) GetSubresource(name string) (any, bool) { func (o *CoreRole) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(CoreRoleStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type CoreRoleStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *CoreRole) DeepCopyInto(dst *CoreRole) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *CoreRoleSpec) DeepCopy() *CoreRoleSpec { func (s *CoreRoleSpec) DeepCopyInto(dst *CoreRoleSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of CoreRoleStatus -func (s *CoreRoleStatus) DeepCopy() *CoreRoleStatus { - cpy := &CoreRoleStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies CoreRoleStatus into another CoreRoleStatus object -func (s *CoreRoleStatus) DeepCopyInto(dst *CoreRoleStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go index 71db46f40fa..1a0133a4798 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type GlobalRoleClient struct { @@ -76,24 +75,6 @@ func (c *GlobalRoleClient) Patch(ctx context.Context, identifier resource.Identi return c.client.Patch(ctx, identifier, req, opts) } -func (c *GlobalRoleClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus GlobalRoleStatus, opts resource.UpdateOptions) (*GlobalRole, error) { - return c.client.Update(ctx, &GlobalRole{ - TypeMeta: metav1.TypeMeta{ - Kind: GlobalRoleKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *GlobalRoleClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go index 2081adb8507..27165fe70bd 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go @@ -21,8 +21,6 @@ type GlobalRole struct { // Spec is the spec of the GlobalRole Spec GlobalRoleSpec `json:"spec" yaml:"spec"` - - Status GlobalRoleStatus `json:"status" yaml:"status"` } func (o *GlobalRole) GetSpec() any { @@ -39,15 +37,11 @@ func (o *GlobalRole) SetSpec(spec any) error { } func (o *GlobalRole) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *GlobalRole) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *GlobalRole) GetSubresource(name string) (any, bool) { func (o *GlobalRole) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(GlobalRoleStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type GlobalRoleStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *GlobalRole) DeepCopyInto(dst *GlobalRole) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *GlobalRoleSpec) DeepCopy() *GlobalRoleSpec { func (s *GlobalRoleSpec) DeepCopyInto(dst *GlobalRoleSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of GlobalRoleStatus -func (s *GlobalRoleStatus) DeepCopy() *GlobalRoleStatus { - cpy := &GlobalRoleStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies GlobalRoleStatus into another GlobalRoleStatus object -func (s *GlobalRoleStatus) DeepCopyInto(dst *GlobalRoleStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go index 66ea08e4d69..9c06311b938 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type GlobalRoleBindingClient struct { @@ -76,24 +75,6 @@ func (c *GlobalRoleBindingClient) Patch(ctx context.Context, identifier resource return c.client.Patch(ctx, identifier, req, opts) } -func (c *GlobalRoleBindingClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus GlobalRoleBindingStatus, opts resource.UpdateOptions) (*GlobalRoleBinding, error) { - return c.client.Update(ctx, &GlobalRoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: GlobalRoleBindingKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *GlobalRoleBindingClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go index 657f1830ff1..3bd4609d25d 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go @@ -21,8 +21,6 @@ type GlobalRoleBinding struct { // Spec is the spec of the GlobalRoleBinding Spec GlobalRoleBindingSpec `json:"spec" yaml:"spec"` - - Status GlobalRoleBindingStatus `json:"status" yaml:"status"` } func (o *GlobalRoleBinding) GetSpec() any { @@ -39,15 +37,11 @@ func (o *GlobalRoleBinding) SetSpec(spec any) error { } func (o *GlobalRoleBinding) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *GlobalRoleBinding) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *GlobalRoleBinding) GetSubresource(name string) (any, bool) { func (o *GlobalRoleBinding) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(GlobalRoleBindingStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type GlobalRoleBindingStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *GlobalRoleBinding) DeepCopyInto(dst *GlobalRoleBinding) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *GlobalRoleBindingSpec) DeepCopy() *GlobalRoleBindingSpec { func (s *GlobalRoleBindingSpec) DeepCopyInto(dst *GlobalRoleBindingSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of GlobalRoleBindingStatus -func (s *GlobalRoleBindingStatus) DeepCopy() *GlobalRoleBindingStatus { - cpy := &GlobalRoleBindingStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies GlobalRoleBindingStatus into another GlobalRoleBindingStatus object -func (s *GlobalRoleBindingStatus) DeepCopyInto(dst *GlobalRoleBindingStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go index 05c70845e26..dab46ffdc05 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ResourcePermissionClient struct { @@ -76,24 +75,6 @@ func (c *ResourcePermissionClient) Patch(ctx context.Context, identifier resourc return c.client.Patch(ctx, identifier, req, opts) } -func (c *ResourcePermissionClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus ResourcePermissionStatus, opts resource.UpdateOptions) (*ResourcePermission, error) { - return c.client.Update(ctx, &ResourcePermission{ - TypeMeta: metav1.TypeMeta{ - Kind: ResourcePermissionKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *ResourcePermissionClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go index 4fcebde9e01..996beb7e002 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go @@ -21,8 +21,6 @@ type ResourcePermission struct { // Spec is the spec of the ResourcePermission Spec ResourcePermissionSpec `json:"spec" yaml:"spec"` - - Status ResourcePermissionStatus `json:"status" yaml:"status"` } func (o *ResourcePermission) GetSpec() any { @@ -39,15 +37,11 @@ func (o *ResourcePermission) SetSpec(spec any) error { } func (o *ResourcePermission) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *ResourcePermission) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *ResourcePermission) GetSubresource(name string) (any, bool) { func (o *ResourcePermission) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(ResourcePermissionStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type ResourcePermissionStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *ResourcePermission) DeepCopyInto(dst *ResourcePermission) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *ResourcePermissionSpec) DeepCopy() *ResourcePermissionSpec { func (s *ResourcePermissionSpec) DeepCopyInto(dst *ResourcePermissionSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of ResourcePermissionStatus -func (s *ResourcePermissionStatus) DeepCopy() *ResourcePermissionStatus { - cpy := &ResourcePermissionStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies ResourcePermissionStatus into another ResourcePermissionStatus object -func (s *ResourcePermissionStatus) DeepCopyInto(dst *ResourcePermissionStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go index e61d3fc83dd..b53bfdd9e6d 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type RoleClient struct { @@ -76,24 +75,6 @@ func (c *RoleClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *RoleClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus RoleStatus, opts resource.UpdateOptions) (*Role, error) { - return c.client.Update(ctx, &Role{ - TypeMeta: metav1.TypeMeta{ - Kind: RoleKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *RoleClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go index 9256a618c9f..20bb587157e 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go @@ -21,8 +21,6 @@ type Role struct { // Spec is the spec of the Role Spec RoleSpec `json:"spec" yaml:"spec"` - - Status RoleStatus `json:"status" yaml:"status"` } func (o *Role) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Role) SetSpec(spec any) error { } func (o *Role) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Role) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Role) GetSubresource(name string) (any, bool) { func (o *Role) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(RoleStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type RoleStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Role) DeepCopyInto(dst *Role) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *RoleSpec) DeepCopy() *RoleSpec { func (s *RoleSpec) DeepCopyInto(dst *RoleSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of RoleStatus -func (s *RoleStatus) DeepCopy() *RoleStatus { - cpy := &RoleStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies RoleStatus into another RoleStatus object -func (s *RoleStatus) DeepCopyInto(dst *RoleStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go index 13e78c70adf..96807b1b795 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type RoleBindingClient struct { @@ -76,24 +75,6 @@ func (c *RoleBindingClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *RoleBindingClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus RoleBindingStatus, opts resource.UpdateOptions) (*RoleBinding, error) { - return c.client.Update(ctx, &RoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: RoleBindingKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *RoleBindingClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go index bec75890d89..dfd7741e05a 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go @@ -21,8 +21,6 @@ type RoleBinding struct { // Spec is the spec of the RoleBinding Spec RoleBindingSpec `json:"spec" yaml:"spec"` - - Status RoleBindingStatus `json:"status" yaml:"status"` } func (o *RoleBinding) GetSpec() any { @@ -39,15 +37,11 @@ func (o *RoleBinding) SetSpec(spec any) error { } func (o *RoleBinding) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *RoleBinding) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *RoleBinding) GetSubresource(name string) (any, bool) { func (o *RoleBinding) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(RoleBindingStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type RoleBindingStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *RoleBinding) DeepCopyInto(dst *RoleBinding) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *RoleBindingSpec) DeepCopy() *RoleBindingSpec { func (s *RoleBindingSpec) DeepCopyInto(dst *RoleBindingSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of RoleBindingStatus -func (s *RoleBindingStatus) DeepCopy() *RoleBindingStatus { - cpy := &RoleBindingStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies RoleBindingStatus into another RoleBindingStatus object -func (s *RoleBindingStatus) DeepCopyInto(dst *RoleBindingStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go index 3b922e8b313..0ce1616daa3 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ServiceAccountClient struct { @@ -76,24 +75,6 @@ func (c *ServiceAccountClient) Patch(ctx context.Context, identifier resource.Id return c.client.Patch(ctx, identifier, req, opts) } -func (c *ServiceAccountClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus ServiceAccountStatus, opts resource.UpdateOptions) (*ServiceAccount, error) { - return c.client.Update(ctx, &ServiceAccount{ - TypeMeta: metav1.TypeMeta{ - Kind: ServiceAccountKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *ServiceAccountClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go index 081e244dd6e..fe3cd7f3609 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go @@ -21,8 +21,6 @@ type ServiceAccount struct { // Spec is the spec of the ServiceAccount Spec ServiceAccountSpec `json:"spec" yaml:"spec"` - - Status ServiceAccountStatus `json:"status" yaml:"status"` } func (o *ServiceAccount) GetSpec() any { @@ -39,15 +37,11 @@ func (o *ServiceAccount) SetSpec(spec any) error { } func (o *ServiceAccount) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *ServiceAccount) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *ServiceAccount) GetSubresource(name string) (any, bool) { func (o *ServiceAccount) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(ServiceAccountStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type ServiceAccountStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *ServiceAccount) DeepCopyInto(dst *ServiceAccount) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *ServiceAccountSpec) DeepCopy() *ServiceAccountSpec { func (s *ServiceAccountSpec) DeepCopyInto(dst *ServiceAccountSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of ServiceAccountStatus -func (s *ServiceAccountStatus) DeepCopy() *ServiceAccountStatus { - cpy := &ServiceAccountStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies ServiceAccountStatus into another ServiceAccountStatus object -func (s *ServiceAccountStatus) DeepCopyInto(dst *ServiceAccountStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go index c166c6c2881..807c68dc0f6 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type TeamClient struct { @@ -76,24 +75,6 @@ func (c *TeamClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *TeamClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus TeamStatus, opts resource.UpdateOptions) (*Team, error) { - return c.client.Update(ctx, &Team{ - TypeMeta: metav1.TypeMeta{ - Kind: TeamKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *TeamClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go index bfc949acfd1..4030bebb9d1 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go @@ -21,8 +21,6 @@ type Team struct { // Spec is the spec of the Team Spec TeamSpec `json:"spec" yaml:"spec"` - - Status TeamStatus `json:"status" yaml:"status"` } func (o *Team) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Team) SetSpec(spec any) error { } func (o *Team) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Team) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Team) GetSubresource(name string) (any, bool) { func (o *Team) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(TeamStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type TeamStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Team) DeepCopyInto(dst *Team) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *TeamSpec) DeepCopy() *TeamSpec { func (s *TeamSpec) DeepCopyInto(dst *TeamSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of TeamStatus -func (s *TeamStatus) DeepCopy() *TeamStatus { - cpy := &TeamStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies TeamStatus into another TeamStatus object -func (s *TeamStatus) DeepCopyInto(dst *TeamStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go index 39729e1ece1..faaf86c1b00 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type TeamBindingClient struct { @@ -76,24 +75,6 @@ func (c *TeamBindingClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *TeamBindingClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus TeamBindingStatus, opts resource.UpdateOptions) (*TeamBinding, error) { - return c.client.Update(ctx, &TeamBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: TeamBindingKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *TeamBindingClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go index 6c9f6f407e3..a958c55f5e7 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go @@ -21,8 +21,6 @@ type TeamBinding struct { // Spec is the spec of the TeamBinding Spec TeamBindingSpec `json:"spec" yaml:"spec"` - - Status TeamBindingStatus `json:"status" yaml:"status"` } func (o *TeamBinding) GetSpec() any { @@ -39,15 +37,11 @@ func (o *TeamBinding) SetSpec(spec any) error { } func (o *TeamBinding) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *TeamBinding) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *TeamBinding) GetSubresource(name string) (any, bool) { func (o *TeamBinding) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(TeamBindingStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type TeamBindingStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *TeamBinding) DeepCopyInto(dst *TeamBinding) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *TeamBindingSpec) DeepCopy() *TeamBindingSpec { func (s *TeamBindingSpec) DeepCopyInto(dst *TeamBindingSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of TeamBindingStatus -func (s *TeamBindingStatus) DeepCopy() *TeamBindingStatus { - cpy := &TeamBindingStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies TeamBindingStatus into another TeamBindingStatus object -func (s *TeamBindingStatus) DeepCopyInto(dst *TeamBindingStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go index bd7af9b3361..665df84327e 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type UserClient struct { @@ -76,24 +75,6 @@ func (c *UserClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *UserClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus UserStatus, opts resource.UpdateOptions) (*User, error) { - return c.client.Update(ctx, &User{ - TypeMeta: metav1.TypeMeta{ - Kind: UserKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *UserClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go index ce2defa33a8..373112a1d87 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go @@ -21,8 +21,6 @@ type User struct { // Spec is the spec of the User Spec UserSpec `json:"spec" yaml:"spec"` - - Status UserStatus `json:"status" yaml:"status"` } func (o *User) GetSpec() any { @@ -39,15 +37,11 @@ func (o *User) SetSpec(spec any) error { } func (o *User) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *User) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *User) GetSubresource(name string) (any, bool) { func (o *User) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(UserStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type UserStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *User) DeepCopyInto(dst *User) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *UserSpec) DeepCopy() *UserSpec { func (s *UserSpec) DeepCopyInto(dst *UserSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of UserStatus -func (s *UserStatus) DeepCopy() *UserStatus { - cpy := &UserStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies UserStatus into another UserStatus object -func (s *UserStatus) DeepCopyInto(dst *UserStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go index fb6892fa09d..cb2e31f9144 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go @@ -109,18 +109,12 @@ func schema_pkg_apis_iam_v0alpha1_CoreRole(ref common.ReferenceCallback) common. Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -387,18 +381,12 @@ func schema_pkg_apis_iam_v0alpha1_GlobalRole(ref common.ReferenceCallback) commo Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -435,18 +423,12 @@ func schema_pkg_apis_iam_v0alpha1_GlobalRoleBinding(ref common.ReferenceCallback Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -948,18 +930,12 @@ func schema_pkg_apis_iam_v0alpha1_ResourcePermission(ref common.ReferenceCallbac Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -1247,18 +1223,12 @@ func schema_pkg_apis_iam_v0alpha1_Role(ref common.ReferenceCallback) common.Open Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -1295,18 +1265,12 @@ func schema_pkg_apis_iam_v0alpha1_RoleBinding(ref common.ReferenceCallback) comm Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -1808,18 +1772,12 @@ func schema_pkg_apis_iam_v0alpha1_ServiceAccount(ref common.ReferenceCallback) c Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -2040,18 +1998,12 @@ func schema_pkg_apis_iam_v0alpha1_Team(ref common.ReferenceCallback) common.Open Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -2088,18 +2040,12 @@ func schema_pkg_apis_iam_v0alpha1_TeamBinding(ref common.ReferenceCallback) comm Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -2547,18 +2493,12 @@ func schema_pkg_apis_iam_v0alpha1_User(ref common.ReferenceCallback) common.Open Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } diff --git a/apps/investigations/go.mod b/apps/investigations/go.mod index 86bec16425b..6cf0836786d 100644 --- a/apps/investigations/go.mod +++ b/apps/investigations/go.mod @@ -78,7 +78,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/investigations/go.sum b/apps/investigations/go.sum index b34bb79ec13..396ebebc9de 100644 --- a/apps/investigations/go.sum +++ b/apps/investigations/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/playlist/go.mod b/apps/playlist/go.mod index 2284cb1cb03..08cebc839ab 100644 --- a/apps/playlist/go.mod +++ b/apps/playlist/go.mod @@ -79,7 +79,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/playlist/go.sum b/apps/playlist/go.sum index b34bb79ec13..396ebebc9de 100644 --- a/apps/playlist/go.sum +++ b/apps/playlist/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/plugins/go.mod b/apps/plugins/go.mod index 05f0880b356..1ff6e32794c 100644 --- a/apps/plugins/go.mod +++ b/apps/plugins/go.mod @@ -3,7 +3,7 @@ module github.com/grafana/grafana/apps/plugins go 1.24.4 require ( - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 github.com/grafana/grafana-app-sdk v0.45.0 github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250428110029-a8ea72012bde k8s.io/apimachinery v0.34.1 @@ -35,7 +35,7 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/grafana-app-sdk/logging v0.45.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect @@ -86,7 +86,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/plugins/go.sum b/apps/plugins/go.sum index 19b005ae46f..1b98503c4a5 100644 --- a/apps/plugins/go.sum +++ b/apps/plugins/go.sum @@ -50,10 +50,10 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.45.0 h1:niFqYovxuw9vnUB9qoxEgmupqriG7Gns9ZGwB2uuOyE= @@ -219,8 +219,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/preferences/Makefile b/apps/preferences/Makefile index 230bfd4149a..bc8d6d30cb5 100644 --- a/apps/preferences/Makefile +++ b/apps/preferences/Makefile @@ -6,4 +6,5 @@ generate: install-app-sdk update-app-sdk --source=./kinds/ \ --gogenpath=./pkg/apis \ --grouping=group \ + --genoperatorstate=false \ --defencoding=none \ No newline at end of file diff --git a/apps/preferences/go.mod b/apps/preferences/go.mod index 3c369fbbfc0..3024a0128df 100644 --- a/apps/preferences/go.mod +++ b/apps/preferences/go.mod @@ -55,7 +55,7 @@ require ( golang.org/x/term v0.35.0 // indirect golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.13.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/client-go v0.34.1 // indirect diff --git a/apps/preferences/go.sum b/apps/preferences/go.sum index 392dfb7d257..8bcea32a115 100644 --- a/apps/preferences/go.sum +++ b/apps/preferences/go.sum @@ -152,8 +152,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go index 380bafb16bf..66e661e574d 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type PreferencesClient struct { @@ -76,24 +75,6 @@ func (c *PreferencesClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *PreferencesClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus PreferencesStatus, opts resource.UpdateOptions) (*Preferences, error) { - return c.client.Update(ctx, &Preferences{ - TypeMeta: metav1.TypeMeta{ - Kind: PreferencesKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *PreferencesClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go index 9f1857a5f7b..81a4b4351ab 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go @@ -21,8 +21,6 @@ type Preferences struct { // Spec is the spec of the Preferences Spec PreferencesSpec `json:"spec" yaml:"spec"` - - Status PreferencesStatus `json:"status" yaml:"status"` } func (o *Preferences) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Preferences) SetSpec(spec any) error { } func (o *Preferences) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Preferences) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Preferences) GetSubresource(name string) (any, bool) { func (o *Preferences) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(PreferencesStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type PreferencesStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Preferences) DeepCopyInto(dst *Preferences) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *PreferencesSpec) DeepCopy() *PreferencesSpec { func (s *PreferencesSpec) DeepCopyInto(dst *PreferencesSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of PreferencesStatus -func (s *PreferencesStatus) DeepCopy() *PreferencesStatus { - cpy := &PreferencesStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies PreferencesStatus into another PreferencesStatus object -func (s *PreferencesStatus) DeepCopyInto(dst *PreferencesStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_status_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_status_gen.go deleted file mode 100644 index 420dd054d3d..00000000000 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_status_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -package v1alpha1 - -// +k8s:openapi-gen=true -type PreferencesstatusOperatorState struct { - // lastEvaluation is the ResourceVersion last evaluated - LastEvaluation string `json:"lastEvaluation"` - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - State PreferencesStatusOperatorStateState `json:"state"` - // descriptiveState is an optional more descriptive state field which has no requirements on format - DescriptiveState *string `json:"descriptiveState,omitempty"` - // details contains any extra information that is operator-specific - Details map[string]interface{} `json:"details,omitempty"` -} - -// NewPreferencesstatusOperatorState creates a new PreferencesstatusOperatorState object. -func NewPreferencesstatusOperatorState() *PreferencesstatusOperatorState { - return &PreferencesstatusOperatorState{} -} - -// +k8s:openapi-gen=true -type PreferencesStatus struct { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - OperatorStates map[string]PreferencesstatusOperatorState `json:"operatorStates,omitempty"` - // additionalFields is reserved for future use - AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"` -} - -// NewPreferencesStatus creates a new PreferencesStatus object. -func NewPreferencesStatus() *PreferencesStatus { - return &PreferencesStatus{} -} - -// +k8s:openapi-gen=true -type PreferencesStatusOperatorStateState string - -const ( - PreferencesStatusOperatorStateStateSuccess PreferencesStatusOperatorStateState = "success" - PreferencesStatusOperatorStateStateInProgress PreferencesStatusOperatorStateState = "in_progress" - PreferencesStatusOperatorStateStateFailed PreferencesStatusOperatorStateState = "failed" -) diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go index 3a607012db3..9f2c56ea59b 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type StarsClient struct { @@ -76,24 +75,6 @@ func (c *StarsClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *StarsClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus StarsStatus, opts resource.UpdateOptions) (*Stars, error) { - return c.client.Update(ctx, &Stars{ - TypeMeta: metav1.TypeMeta{ - Kind: StarsKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *StarsClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go index d6836cea565..8381844dceb 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go @@ -21,8 +21,6 @@ type Stars struct { // Spec is the spec of the Stars Spec StarsSpec `json:"spec" yaml:"spec"` - - Status StarsStatus `json:"status" yaml:"status"` } func (o *Stars) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Stars) SetSpec(spec any) error { } func (o *Stars) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Stars) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Stars) GetSubresource(name string) (any, bool) { func (o *Stars) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(StarsStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type StarsStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Stars) DeepCopyInto(dst *Stars) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *StarsSpec) DeepCopy() *StarsSpec { func (s *StarsSpec) DeepCopyInto(dst *StarsSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of StarsStatus -func (s *StarsStatus) DeepCopy() *StarsStatus { - cpy := &StarsStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies StarsStatus into another StarsStatus object -func (s *StarsStatus) DeepCopyInto(dst *StarsStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_status_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_status_gen.go deleted file mode 100644 index c8c7c2b07f2..00000000000 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_status_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -package v1alpha1 - -// +k8s:openapi-gen=true -type StarsstatusOperatorState struct { - // lastEvaluation is the ResourceVersion last evaluated - LastEvaluation string `json:"lastEvaluation"` - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - State StarsStatusOperatorStateState `json:"state"` - // descriptiveState is an optional more descriptive state field which has no requirements on format - DescriptiveState *string `json:"descriptiveState,omitempty"` - // details contains any extra information that is operator-specific - Details map[string]interface{} `json:"details,omitempty"` -} - -// NewStarsstatusOperatorState creates a new StarsstatusOperatorState object. -func NewStarsstatusOperatorState() *StarsstatusOperatorState { - return &StarsstatusOperatorState{} -} - -// +k8s:openapi-gen=true -type StarsStatus struct { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - OperatorStates map[string]StarsstatusOperatorState `json:"operatorStates,omitempty"` - // additionalFields is reserved for future use - AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"` -} - -// NewStarsStatus creates a new StarsStatus object. -func NewStarsStatus() *StarsStatus { - return &StarsStatus{} -} - -// +k8s:openapi-gen=true -type StarsStatusOperatorStateState string - -const ( - StarsStatusOperatorStateStateSuccess StarsStatusOperatorStateState = "success" - StarsStatusOperatorStateStateInProgress StarsStatusOperatorStateState = "in_progress" - StarsStatusOperatorStateStateFailed StarsStatusOperatorStateState = "failed" -) diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go b/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go index 7b9014a57bb..c8700a3dfa5 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go @@ -20,14 +20,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesNavbarPreference": schema_pkg_apis_preferences_v1alpha1_PreferencesNavbarPreference(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesQueryHistoryPreference": schema_pkg_apis_preferences_v1alpha1_PreferencesQueryHistoryPreference(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec": schema_pkg_apis_preferences_v1alpha1_PreferencesSpec(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesStatus": schema_pkg_apis_preferences_v1alpha1_PreferencesStatus(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesstatusOperatorState": schema_pkg_apis_preferences_v1alpha1_PreferencesstatusOperatorState(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.Stars": schema_pkg_apis_preferences_v1alpha1_Stars(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsList": schema_pkg_apis_preferences_v1alpha1_StarsList(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsResource": schema_pkg_apis_preferences_v1alpha1_StarsResource(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec": schema_pkg_apis_preferences_v1alpha1_StarsSpec(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsStatus": schema_pkg_apis_preferences_v1alpha1_StarsStatus(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsstatusOperatorState": schema_pkg_apis_preferences_v1alpha1_StarsstatusOperatorState(ref), } } @@ -64,18 +60,12 @@ func schema_pkg_apis_preferences_v1alpha1_Preferences(ref common.ReferenceCallba Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec", "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -277,101 +267,6 @@ func schema_pkg_apis_preferences_v1alpha1_PreferencesSpec(ref common.ReferenceCa } } -func schema_pkg_apis_preferences_v1alpha1_PreferencesStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "operatorStates": { - SchemaProps: spec.SchemaProps{ - Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesstatusOperatorState"), - }, - }, - }, - }, - }, - "additionalFields": { - SchemaProps: spec.SchemaProps{ - Description: "additionalFields is reserved for future use", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesstatusOperatorState"}, - } -} - -func schema_pkg_apis_preferences_v1alpha1_PreferencesstatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "lastEvaluation": { - SchemaProps: spec.SchemaProps{ - Description: "lastEvaluation is the ResourceVersion last evaluated", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "state": { - SchemaProps: spec.SchemaProps{ - Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "descriptiveState": { - SchemaProps: spec.SchemaProps{ - Description: "descriptiveState is an optional more descriptive state field which has no requirements on format", - Type: []string{"string"}, - Format: "", - }, - }, - "details": { - SchemaProps: spec.SchemaProps{ - Description: "details contains any extra information that is operator-specific", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"lastEvaluation", "state"}, - }, - }, - } -} - func schema_pkg_apis_preferences_v1alpha1_Stars(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -405,18 +300,12 @@ func schema_pkg_apis_preferences_v1alpha1_Stars(ref common.ReferenceCallback) co Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec", "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -542,98 +431,3 @@ func schema_pkg_apis_preferences_v1alpha1_StarsSpec(ref common.ReferenceCallback "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsResource"}, } } - -func schema_pkg_apis_preferences_v1alpha1_StarsStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "operatorStates": { - SchemaProps: spec.SchemaProps{ - Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsstatusOperatorState"), - }, - }, - }, - }, - }, - "additionalFields": { - SchemaProps: spec.SchemaProps{ - Description: "additionalFields is reserved for future use", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsstatusOperatorState"}, - } -} - -func schema_pkg_apis_preferences_v1alpha1_StarsstatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "lastEvaluation": { - SchemaProps: spec.SchemaProps{ - Description: "lastEvaluation is the ResourceVersion last evaluated", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "state": { - SchemaProps: spec.SchemaProps{ - Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "descriptiveState": { - SchemaProps: spec.SchemaProps{ - Description: "descriptiveState is an optional more descriptive state field which has no requirements on format", - Type: []string{"string"}, - Format: "", - }, - }, - "details": { - SchemaProps: spec.SchemaProps{ - Description: "details contains any extra information that is operator-specific", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"lastEvaluation", "state"}, - }, - }, - } -} diff --git a/apps/preferences/pkg/apis/preferences_manifest.go b/apps/preferences/pkg/apis/preferences_manifest.go index b0a3468058a..3e9e865df6d 100644 --- a/apps/preferences/pkg/apis/preferences_manifest.go +++ b/apps/preferences/pkg/apis/preferences_manifest.go @@ -19,10 +19,10 @@ import ( ) var ( - rawSchemaPreferencesv1alpha1 = []byte(`{"CookiePreferences":{"additionalProperties":false,"properties":{"analytics":{"additionalProperties":{},"type":"object"},"functional":{"additionalProperties":{},"type":"object"},"performance":{"additionalProperties":{},"type":"object"}},"type":"object"},"NavbarPreference":{"additionalProperties":false,"properties":{"bookmarkUrls":{"items":{"type":"string"},"type":"array"}},"required":["bookmarkUrls"],"type":"object"},"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"Preferences":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"QueryHistoryPreference":{"additionalProperties":false,"properties":{"homeTab":{"description":"one of: '' | 'query' | 'starred';","type":"string"}},"type":"object"},"spec":{"additionalProperties":false,"properties":{"cookiePreferences":{"$ref":"#/components/schemas/CookiePreferences","description":"Cookie preferences"},"homeDashboardUID":{"description":"UID for the home dashboard","type":"string"},"language":{"description":"Selected language (beta)","type":"string"},"navbar":{"$ref":"#/components/schemas/NavbarPreference","description":"Navigation preferences"},"queryHistory":{"$ref":"#/components/schemas/QueryHistoryPreference","description":"Explore query history preferences"},"regionalFormat":{"description":"Selected locale (beta)","type":"string"},"theme":{"description":"light, dark, empty is default","type":"string"},"timezone":{"description":"The timezone selection\nTODO: this should use the timezone defined in common","type":"string"},"weekStart":{"description":"day of the week (sunday, monday, etc)","type":"string"}},"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"additionalFields is reserved for future use","type":"object"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"type":"object"}}`) + rawSchemaPreferencesv1alpha1 = []byte(`{"CookiePreferences":{"additionalProperties":false,"properties":{"analytics":{"additionalProperties":{},"type":"object"},"functional":{"additionalProperties":{},"type":"object"},"performance":{"additionalProperties":{},"type":"object"}},"type":"object"},"NavbarPreference":{"additionalProperties":false,"properties":{"bookmarkUrls":{"items":{"type":"string"},"type":"array"}},"required":["bookmarkUrls"],"type":"object"},"Preferences":{"properties":{"spec":{"$ref":"#/components/schemas/spec"}},"required":["spec"]},"QueryHistoryPreference":{"additionalProperties":false,"properties":{"homeTab":{"description":"one of: '' | 'query' | 'starred';","type":"string"}},"type":"object"},"spec":{"additionalProperties":false,"properties":{"cookiePreferences":{"$ref":"#/components/schemas/CookiePreferences","description":"Cookie preferences"},"homeDashboardUID":{"description":"UID for the home dashboard","type":"string"},"language":{"description":"Selected language (beta)","type":"string"},"navbar":{"$ref":"#/components/schemas/NavbarPreference","description":"Navigation preferences"},"queryHistory":{"$ref":"#/components/schemas/QueryHistoryPreference","description":"Explore query history preferences"},"regionalFormat":{"description":"Selected locale (beta)","type":"string"},"theme":{"description":"light, dark, empty is default","type":"string"},"timezone":{"description":"The timezone selection\nTODO: this should use the timezone defined in common","type":"string"},"weekStart":{"description":"day of the week (sunday, monday, etc)","type":"string"}},"type":"object"}}`) versionSchemaPreferencesv1alpha1 app.VersionSchema _ = json.Unmarshal(rawSchemaPreferencesv1alpha1, &versionSchemaPreferencesv1alpha1) - rawSchemaStarsv1alpha1 = []byte(`{"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"Resource":{"additionalProperties":false,"properties":{"group":{"type":"string"},"kind":{"type":"string"},"names":{"description":"The set of resources\n+listType=set","items":{"type":"string"},"type":"array"}},"required":["group","kind","names"],"type":"object"},"Stars":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"spec":{"additionalProperties":false,"properties":{"resource":{"items":{"$ref":"#/components/schemas/Resource"},"type":"array"}},"required":["resource"],"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"additionalFields is reserved for future use","type":"object"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"type":"object"}}`) + rawSchemaStarsv1alpha1 = []byte(`{"Resource":{"additionalProperties":false,"properties":{"group":{"type":"string"},"kind":{"type":"string"},"names":{"description":"The set of resources\n+listType=set","items":{"type":"string"},"type":"array"}},"required":["group","kind","names"],"type":"object"},"Stars":{"properties":{"spec":{"$ref":"#/components/schemas/spec"}},"required":["spec"]},"spec":{"additionalProperties":false,"properties":{"resource":{"items":{"$ref":"#/components/schemas/Resource"},"type":"array"}},"required":["resource"],"type":"object"}}`) versionSchemaStarsv1alpha1 app.VersionSchema _ = json.Unmarshal(rawSchemaStarsv1alpha1, &versionSchemaStarsv1alpha1) ) diff --git a/apps/provisioning/Makefile b/apps/provisioning/Makefile index c3d7e748603..e9a8b1fbdf9 100644 --- a/apps/provisioning/Makefile +++ b/apps/provisioning/Makefile @@ -1,8 +1,13 @@ include ../sdk.mk -.PHONY: generate +.PHONY: generate # Run Grafana App SDK code generation generate: install-app-sdk update-app-sdk - @$(APP_SDK_BIN) generate -g ./kinds --grouping=group --postprocess --defencoding=none --useoldmanifestkinds + @$(APP_SDK_BIN) generate \ + --source=./kinds/ \ + --gogenpath=./pkg/apis \ + --grouping=group \ + --genoperatorstate=false \ + --defencoding=none .PHONY: build build: generate diff --git a/apps/provisioning/go.mod b/apps/provisioning/go.mod index 09f7cc24835..61137e897e5 100644 --- a/apps/provisioning/go.mod +++ b/apps/provisioning/go.mod @@ -5,7 +5,7 @@ go 1.24.6 require ( github.com/google/go-github/v70 v70.0.0 github.com/google/uuid v1.6.0 - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana/apps/secret v0.0.0-20250902093454-b56b7add012f github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250804150913-990f1c69ecc2 @@ -40,7 +40,7 @@ require ( github.com/google/go-github/v64 v64.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // indirect + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/grafana-app-sdk v0.45.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -75,7 +75,7 @@ require ( golang.org/x/time v0.13.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/apps/provisioning/go.sum b/apps/provisioning/go.sum index ef6ed2203e6..74542a4123f 100644 --- a/apps/provisioning/go.sum +++ b/apps/provisioning/go.sum @@ -52,10 +52,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.45.0 h1:niFqYovxuw9vnUB9qoxEgmupqriG7Gns9ZGwB2uuOyE= @@ -224,8 +224,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/provisioning/pkg/repository/github/client.go b/apps/provisioning/pkg/repository/github/client.go index 5b21a5a4f1e..00f1ca0946b 100644 --- a/apps/provisioning/pkg/repository/github/client.go +++ b/apps/provisioning/pkg/repository/github/client.go @@ -13,6 +13,7 @@ import ( // API errors that we need to convey after parsing real GH errors (or faking them). var ( ErrResourceNotFound = errors.New("the resource does not exist") + ErrUnauthorized = errors.New("unauthorized") //lint:ignore ST1005 this is not punctuation ErrServiceUnavailable = apierrors.NewServiceUnavailable("github is unavailable") ErrTooManyItems = errors.New("maximum number of items exceeded") diff --git a/apps/provisioning/pkg/repository/github/impl.go b/apps/provisioning/pkg/repository/github/impl.go index c9f5468bc8d..c80f1e7a7f9 100644 --- a/apps/provisioning/pkg/repository/github/impl.go +++ b/apps/provisioning/pkg/repository/github/impl.go @@ -199,6 +199,9 @@ func (r *githubClient) DeleteWebhook(ctx context.Context, owner, repository stri if ghErr.Response.StatusCode == http.StatusNotFound { return ErrResourceNotFound } + if ghErr.Response.StatusCode == http.StatusUnauthorized || ghErr.Response.StatusCode == http.StatusForbidden { + return ErrUnauthorized + } return err } diff --git a/apps/provisioning/pkg/repository/github/impl_test.go b/apps/provisioning/pkg/repository/github/impl_test.go index fedbc2a9850..57f954e906e 100644 --- a/apps/provisioning/pkg/repository/github/impl_test.go +++ b/apps/provisioning/pkg/repository/github/impl_test.go @@ -975,6 +975,27 @@ func TestGithubClient_DeleteWebhook(t *testing.T) { webhookID: 789, wantErr: ErrServiceUnavailable, }, + { + name: "unauthorized to delete the webhook", + mockHandler: mockhub.NewMockedHTTPClient( + mockhub.WithRequestMatchHandler( + mockhub.DeleteReposHooksByOwnerByRepoByHookId, + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + require.NoError(t, json.NewEncoder(w).Encode(github.ErrorResponse{ + Response: &http.Response{ + StatusCode: http.StatusUnauthorized, + }, + Message: "401 bad credentials", + })) + }), + ), + ), + owner: "test-owner", + repository: "test-repo", + webhookID: 789, + wantErr: ErrUnauthorized, + }, { name: "other error", mockHandler: mockhub.NewMockedHTTPClient( diff --git a/apps/provisioning/pkg/repository/github/webhook.go b/apps/provisioning/pkg/repository/github/webhook.go index 3c37b00cba7..c2e13681f33 100644 --- a/apps/provisioning/pkg/repository/github/webhook.go +++ b/apps/provisioning/pkg/repository/github/webhook.go @@ -274,11 +274,15 @@ func (r *githubWebhookRepository) deleteWebhook(ctx context.Context) error { id := r.config.Status.Webhook.ID err := r.gh.DeleteWebhook(ctx, r.owner, r.repo, id) - if err != nil && !errors.Is(err, ErrResourceNotFound) { + if err != nil && !errors.Is(err, ErrResourceNotFound) && !errors.Is(err, ErrUnauthorized) { return fmt.Errorf("delete webhook: %w", err) } if errors.Is(err, ErrResourceNotFound) { - logger.Info("webhook does not exist", "url", r.config.Status.Webhook.URL, "id", id) + logger.Warn("webhook no longer exists", "url", r.config.Status.Webhook.URL, "id", id) + return nil + } + if errors.Is(err, ErrUnauthorized) { + logger.Warn("webhook deletion failed. no longer authorized to delete this webhook", "url", r.config.Status.Webhook.URL, "id", id) return nil } diff --git a/apps/provisioning/pkg/repository/github/webhook_test.go b/apps/provisioning/pkg/repository/github/webhook_test.go index 5021623fe90..49faf733378 100644 --- a/apps/provisioning/pkg/repository/github/webhook_test.go +++ b/apps/provisioning/pkg/repository/github/webhook_test.go @@ -1565,6 +1565,32 @@ func TestGitHubRepository_OnDelete(t *testing.T) { // We don't return an error if the webhook is already gone expectedError: nil, }, + { + name: "unauthorized to delete the webhook", + setupMock: func(m *MockClient) { + m.On("DeleteWebhook", mock.Anything, "grafana", "grafana", int64(123)). + Return(ErrUnauthorized) + }, + config: &provisioning.Repository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-repo", + }, + Spec: provisioning.RepositorySpec{ + GitHub: &provisioning.GitHubRepositoryConfig{ + Branch: "main", + }, + }, + Status: provisioning.RepositoryStatus{ + Webhook: &provisioning.WebhookStatus{ + ID: 123, + URL: "https://example.com/webhook", + }, + }, + }, + webhookURL: "https://example.com/webhook", + // We don't return an error if access to the webhook is revoked + expectedError: nil, + }, { name: "no webhook URL provided", setupMock: func(_ *MockClient) {}, diff --git a/apps/provisioning/pkg/repository/test.go b/apps/provisioning/pkg/repository/test.go index 4c81013c477..251c8a9f06c 100644 --- a/apps/provisioning/pkg/repository/test.go +++ b/apps/provisioning/pkg/repository/test.go @@ -75,11 +75,6 @@ func ValidateRepository(repo Repository) field.ErrorList { "The target type is required when sync is enabled")) } - if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < 10 { - list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"), - cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", 10))) - } - // Reserved names (for now) reserved := []string{"classic", "sql", "SQL", "plugins", "legacy", "new", "job", "github", "s3", "gcs", "file", "new", "create", "update", "delete"} if slices.Contains(reserved, cfg.Name) { diff --git a/apps/provisioning/pkg/repository/test_test.go b/apps/provisioning/pkg/repository/test_test.go index 752f28bb979..4ebdd619d77 100644 --- a/apps/provisioning/pkg/repository/test_test.go +++ b/apps/provisioning/pkg/repository/test_test.go @@ -74,28 +74,6 @@ func TestValidateRepository(t *testing.T) { require.Contains(t, errors.ToAggregate().Error(), "spec.sync.target: Required value") }, }, - { - name: "sync interval too low", - repository: func() *MockRepository { - m := NewMockRepository(t) - m.On("Config").Return(&provisioning.Repository{ - Spec: provisioning.RepositorySpec{ - Title: "Test Repo", - Sync: provisioning.SyncOptions{ - Enabled: true, - Target: "test", - IntervalSeconds: 5, - }, - }, - }) - m.On("Validate").Return(field.ErrorList{}) - return m - }(), - expectedErrs: 1, - validateError: func(t *testing.T, errors field.ErrorList) { - require.Contains(t, errors.ToAggregate().Error(), "spec.sync.intervalSeconds: Invalid value") - }, - }, { name: "reserved name", repository: func() *MockRepository { @@ -191,11 +169,10 @@ func TestValidateRepository(t *testing.T) { m.On("Validate").Return(field.ErrorList{}) return m }(), - expectedErrs: 4, // Updated from 3 to 4 to match actual errors: + expectedErrs: 3, // 1. missing title // 2. sync target missing - // 3. sync interval too low - // 4. reserved name + // 3. reserved name }, { name: "branch workflow for non-github repository", @@ -447,18 +424,6 @@ func TestFromFieldError(t *testing.T) { expectedType: metav1.CauseTypeFieldValueRequired, expectedDetail: "a repository title must be given", }, - { - name: "invalid field error", - fieldError: &field.Error{ - Type: field.ErrorTypeInvalid, - Field: "spec.sync.intervalSeconds", - Detail: "Interval must be at least 10 seconds", - }, - expectedCode: http.StatusBadRequest, - expectedField: "spec.sync.intervalSeconds", - expectedType: metav1.CauseTypeFieldValueInvalid, - expectedDetail: "Interval must be at least 10 seconds", - }, { name: "not supported field error", fieldError: &field.Error{ diff --git a/apps/secret/go.mod b/apps/secret/go.mod index c6d3459746f..e6510b74019 100644 --- a/apps/secret/go.mod +++ b/apps/secret/go.mod @@ -7,7 +7,7 @@ require ( github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250710134100-1f3dc0533caf github.com/stretchr/testify v1.11.1 google.golang.org/grpc v1.75.1 - google.golang.org/protobuf v1.36.8 + google.golang.org/protobuf v1.36.9 gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.34.1 k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b diff --git a/apps/secret/go.sum b/apps/secret/go.sum index 45f9c909509..1a9beecb06c 100644 --- a/apps/secret/go.sum +++ b/apps/secret/go.sum @@ -170,8 +170,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/shorturl/go.mod b/apps/shorturl/go.mod index e812815f1de..3fba5d3439e 100644 --- a/apps/shorturl/go.mod +++ b/apps/shorturl/go.mod @@ -33,8 +33,8 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -84,7 +84,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/shorturl/go.sum b/apps/shorturl/go.sum index af2f834077e..9f0fe4068f8 100644 --- a/apps/shorturl/go.sum +++ b/apps/shorturl/go.sum @@ -48,10 +48,10 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.45.0 h1:niFqYovxuw9vnUB9qoxEgmupqriG7Gns9ZGwB2uuOyE= @@ -217,8 +217,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/conf/defaults.ini b/conf/defaults.ini index bc599285555..1c47f30e6f3 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -2229,3 +2229,8 @@ allowed_targets = instance|folder # Whether image rendering is allowed for dashboard previews. # Requires image rendering service to be configured. allow_image_rendering = true + +# The minimum sync interval that can be set for a repository. This is how often the controller +# will check if there has been any changes to the repository not propagated by a webhook. +# The minimum value is 10 seconds. +min_sync_interval = 10s diff --git a/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md b/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md index ea79a3686c6..dbe77c05013 100644 --- a/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md +++ b/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md @@ -10,7 +10,6 @@ labels: - cloud - enterprise - oss -menuTitle: Modify dashboard settings title: Modify dashboard settings description: Manage and edit your dashboard settings weight: 8 @@ -56,7 +55,7 @@ To access the dashboard setting page: Adjust dashboard time settings when you want to change the dashboard timezone, the local browser time, and specify auto-refresh time intervals. -1. On the **Settings** page, scroll down to the **Time Options** section of the **General** tab. +1. On the the **General** tab of the **Settings** page, scroll down to the **Time options** section. 1. Specify time settings as follows. - **Time zone:** Specify the local time zone of the service or system that you are monitoring. This can be helpful when monitoring a system or service that operates across several time zones. - **Default:** Grafana uses the default selected time zone for the user profile, team, or organization. If no time zone is specified for the user profile, a team the user is a member of, or the organization, then Grafana uses the local browser time. @@ -71,6 +70,21 @@ Adjust dashboard time settings when you want to change the dashboard timezone, t 1. Click **Save**. 1. Click **Exit edit**. +## Modify graph tooltip behavior + +Use this option to control tooltip and hover highlight behavior across graph panels (for example, time series). + +1. On the the **General** tab of the **Settings** page, scroll down to the **Panel options** section. +1. Choose from the following options to control the tooltip and hover highlight behavior across graph panels: + - **Default** - Tooltip and hover highlight behavior isn't shared across panels. + - **Shared crosshair** - When you hover the cursor over one graph panel in the dashboard, the crosshair is also displayed on all other graph panels in the dashboard. + - **Shared tooltip** - When you hover the cursor over one graph panel in the dashboard, the crosshair and tooltips are also displayed on all other graph panels in the dashboard. + +1. Click **Save dashboard**. +1. (Optional) Enter a description of the changes you've made. +1. Click **Save**. +1. Click **Exit edit**. + ## Add tags You can add metadata to your dashboards using tags. Tags also give you the ability to filter the list of dashboards. @@ -79,7 +93,7 @@ Tags can be up to 50 characters long, including spaces. To add tags to a dashboard, follow these steps: -1. On the **Settings** page, scroll down to the **Tags** section of the **General** tab. +1. On the the **General** tab of the **Settings** page, scroll down to the **Tags** section. 1. In the field, enter a new or existing tag. If you're entering an existing tag, make sure that you spell it the same way or a new tag is created. diff --git a/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md b/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md index bb8de19264f..fda106213f9 100644 --- a/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md +++ b/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md @@ -452,24 +452,28 @@ This transformation is very useful if your data source does not natively filter The available conditions for all fields are: -- **Regex** - Match a regex expression. - **Is Null** - Match if the value is null. - **Is Not Null** - Match if the value is not null. - **Equal** - Match if the value is equal to the specified value. -- **Different** - Match if the value is different than the specified value. +- **Not Equal** - Match if the value is not equal to the specified value. +- **Regex** - Match a regex expression. The available conditions for string fields are: - **Contains substring** - Match if the value contains the specified substring (case insensitive). - **Does not contain substring** - Match if the value doesn't contain the specified substring (case insensitive). -The available conditions for number and time fields are: +The available conditions for number fields are: - **Greater** - Match if the value is greater than the specified value. - **Lower** - Match if the value is lower than the specified value. - **Greater or equal** - Match if the value is greater or equal. - **Lower or equal** - Match if the value is lower or equal. -- **Range** - Match a range between a specified minimum and maximum, min and max included. A time field will pre-populate with variables to filter by selected time. +- **In between** - Match a range between a specified minimum and maximum, min and max included. + +The available conditions for time fields are: + +- **In between** - Match a range between a specified minimum and maximum. The min and max values will pre-populate with variables to filter by selected time. Consider the following dataset: diff --git a/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md b/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md index 6c30109a271..0ebe1aee927 100644 --- a/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md +++ b/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md @@ -6,7 +6,6 @@ description: Learn how to integrate Grafana with Hashicorp Vault so that you can labels: products: - enterprise - - oss title: Integrate Grafana with Hashicorp Vault weight: 500 --- diff --git a/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md b/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md index bd8473be5a1..23a389d582e 100644 --- a/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md +++ b/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md @@ -223,7 +223,7 @@ Team provisioning requires `group_sync_enabled = true` in the SCIM configuration {{< /admonition >}} {{< admonition type="warning" >}} -Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider. +Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider. Optionally, you can disable SCIM group sync to allow manual deletion of teams. {{< /admonition >}} For detailed configuration steps specific to the identity provider, see: diff --git a/docs/sources/upgrade-guide/when-to-upgrade/index.md b/docs/sources/upgrade-guide/when-to-upgrade/index.md index 1ffaf22fd31..e7a29e531e0 100644 --- a/docs/sources/upgrade-guide/when-to-upgrade/index.md +++ b/docs/sources/upgrade-guide/when-to-upgrade/index.md @@ -46,23 +46,19 @@ We provide release documentation in multiple places to address different needs: ## When to expect releases -Grafana currently follows a monthly release schedule. Below are the planned releases for 2025, though these dates may be subject to change: +Grafana currently follows a monthly release schedule. Below are the planned releases for the end of 2025 and the first part of 2026. However, these dates may be subject to change: | **Release date** | **Grafana versions** | **Release type** | | ---------------- | ------------------------- | ---------------- | -| Jan. 28, 2025 | 11.5 & Supported versions | Minor & patching | -| Feb. 18, 2025 | Supported versions | Patching | -| March 25, 2025 | 11.6 & Supported versions | Minor & patching | -| April 23, 2025 | Supported versions | Patching | -| May 5, 2025 | Grafana 12.0 | Major only | -| May 20, 2025 | Supported versions | Patching | -| June 17, 2025 | Supported versions | Patching | -| July 22, 2025 | 12.1 & Supported versions | Minor & patching | | Aug. 12, 2025 | Supported versions | Patching | | Sept. 23, 2025 | 12.2 & Supported versions | Minor & patching | | Oct. 21, 2025 | Supported versions | Patching | | Nov. 18, 2025 | 12.3 & Supported versions | Minor & patching | | Dec. 16, 2025 | Supported versions | Patching | +| Jan. 13, 2026 | Supported versions | Patching | +| Feb. 24, 2026 | 12.4 & Supported versions | Minor & patching | +| Mar. 24, 2026 | Supported versions | Patching | +| TBD | Grafana 13 | Major | ### A few important notes @@ -104,20 +100,16 @@ Here is an overview of version support through 2026: | **Version** | **Release date** | **Support end date** | **Support level** | | ------------------------- | ------------------ | -------------------- | ------------------ | -| 10.2.x | October 24, 2023 | July 24, 2024 | Not Supported | -| 10.3.x | January 23, 2024 | October 23, 2024 | Not Supported | -| 10.4.x (Last minor of 10) | March 5, 2024 | June 5, 2025 | Not Supported | -| 11.0.x | May 14, 2024 | February 14, 2025 | Not Supported | -| 11.1.x | June 25, 2024 | April 23, 2025 | Not Supported | -| 11.2.x | August 27, 2024 | May 27, 2025 | Not Supported | | 11.3.x | October 22, 2024 | July 22, 2025 | Not Supported | -| 11.4.x | December 5, 2024 | September 5, 2025 | Patch Support | -| 11.5.x | January 28, 2025 | October 28, 2025 | Patch Support | +| 11.4.x | December 5, 2024 | September 5, 2025 | Not Supported | +| 11.5.x | January 28, 2025 | October 28, 2025 | Not Supported | | 11.6.x (Last minor of 11) | March 25, 2025 | June 25, 2026 | Patch Support | | 12.0.x | May 5, 2025 | February 5, 2026 | Patch Support | | 12.1.x | July 22, 2025 | April 22, 2026 | Patch Support | -| 12.2.x | September 23, 2025 | June 23, 2026 | Yet to be released | +| 12.2.x | September 23, 2025 | June 23, 2026 | Patch Support | | 12.3.x | November 18, 2025 | August 18, 2026 | Yet to be released | +| 12.4.x (Last minor of 12) | February 24, 2026 | November 24, 2026 | Yet to be released | +| 13.0.0 | TBD | TBD | Yet to be released | ## How are these versions supported? diff --git a/e2e-playwright/fixtures/long-trace-response.json b/e2e-playwright/fixtures/long-trace-response.json index 5605ab1dd7c..ee23f36e69a 100644 --- a/e2e-playwright/fixtures/long-trace-response.json +++ b/e2e-playwright/fixtures/long-trace-response.json @@ -598,7 +598,7 @@ "auth-validator", "config-loader", "config-writer", - "metrics-collector", + "metrics-collector-last-span", "log-writer", "log-reader", "event-publisher", diff --git a/e2e-playwright/various-suite/trace-view-scrolling.spec.ts b/e2e-playwright/various-suite/trace-view-scrolling.spec.ts index 4cc44f6a86f..bb32d3922ee 100644 --- a/e2e-playwright/various-suite/trace-view-scrolling.spec.ts +++ b/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @@ -2,11 +2,6 @@ import { test, expect } from '@grafana/plugin-e2e'; import longTraceResponse from '../fixtures/long-trace-response.json'; -// this test requires a larger viewport -test.use({ - viewport: { width: 1280, height: 1080 }, -}); - test.describe( 'Trace view', { @@ -33,7 +28,7 @@ test.describe( await datasourceList.getByText('gdev-jaeger').click(); // Check that gdev-jaeger is visible in the query editor - await expect(page.getByText('gdev-jaeger')).toBeVisible(); + await expect(page.getByTestId('query-editor-row').getByText('(gdev-jaeger)')).toBeVisible(); // Type the query const queryField = page @@ -44,14 +39,22 @@ test.describe( // Use Shift+Enter to execute the query await queryField.press('Shift+Enter'); - // Get the initial count of span bars - const initialSpanBars = page.getByTestId(selectors.components.TraceViewer.spanBar); - const initialSpanBarCount = await initialSpanBars.count(); + // Wait for the trace viewer to be ready + await expect(page.getByRole('switch', { name: /api\-gateway GET/ })).toBeVisible(); - await initialSpanBars.last().scrollIntoViewIfNeeded(); - await expect - .poll(async () => await page.getByTestId(selectors.components.TraceViewer.spanBar).count()) - .toBeGreaterThan(initialSpanBarCount); + // Note the scrolling element is actually the first child of the scroll view, but we can use the scroll wheel on this anyway + const scrollEl = page.getByTestId(selectors.pages.Explore.General.scrollView); + + // Assert that the last span is not visible in th page - it should be lazily rendered as the user scrolls + const lastSpan = page.getByRole('switch', { name: /metrics\-collector\-last\-span GET/ }); + await expect(lastSpan).not.toBeVisible(); + + // Scroll until the "metrics-collector-last-span GET" switch is visible + await expect(async () => { + await scrollEl.hover(); + await page.mouse.wheel(0, 1000); + await expect(lastSpan).toBeVisible({ timeout: 1 }); + }).toPass(); }); } ); diff --git a/go.mod b/go.mod index 5ea667a79cf..795d76c28b2 100644 --- a/go.mod +++ b/go.mod @@ -87,8 +87,8 @@ require ( github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d // @grafana/alerting-backend - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // @grafana/identity-access-team - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // @grafana/identity-access-team + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics github.com/grafana/dataplane/sdata v0.0.9 // @grafana/observability-metrics github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // @grafana/grafana-backend-group @@ -211,7 +211,7 @@ require ( gonum.org/v1/gonum v0.16.0 // @grafana/oss-big-tent google.golang.org/api v0.235.0 // @grafana/grafana-backend-group google.golang.org/grpc v1.75.1 // @grafana/plugins-platform-backend - google.golang.org/protobuf v1.36.8 // @grafana/plugins-platform-backend + google.golang.org/protobuf v1.36.9 // @grafana/plugins-platform-backend gopkg.in/ini.v1 v1.67.0 // @grafana/alerting-backend gopkg.in/mail.v2 v2.3.1 // @grafana/grafana-backend-group gopkg.in/yaml.v2 v2.4.0 // @grafana/alerting-backend diff --git a/go.sum b/go.sum index 3a861217971..5b9714044a1 100644 --- a/go.sum +++ b/go.sum @@ -1587,10 +1587,10 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d h1:zzEty7HgfXbQ/RiBCJFMqaZiJlqiXuz/Zbc6/H6ksuM= github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d/go.mod h1:T5sitas9VhVj8/S9LeRLy6H75kTBdh/sCCqHo7gaQI8= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA= github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg= github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= @@ -3534,8 +3534,8 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/go.work.sum b/go.work.sum index e1eb4ee0513..8a3cc4f67d2 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1046,10 +1046,12 @@ github.com/grafana/alerting v0.0.0-20250925193206-bd061d3d9185 h1:R494uXJOz7glN7 github.com/grafana/alerting v0.0.0-20250925193206-bd061d3d9185/go.mod h1:T5sitas9VhVj8/S9LeRLy6H75kTBdh/sCCqHo7gaQI8= github.com/grafana/authlib v0.0.0-20250123104008-e99947858901/go.mod h1:/gYfphsNu9v1qYWXxpv1NSvMEMSwvdf8qb8YlgwIRl8= github.com/grafana/authlib v0.0.0-20250909101823-1b466dbd19a1/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= +github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= github.com/grafana/authlib/types v0.0.0-20250120144156-d6737a7dc8f5/go.mod h1:qYjSd1tmJiuVoSICp7Py9/zD54O9uQQA3wuM6Gg4DFM= github.com/grafana/authlib/types v0.0.0-20250120145936-5f0e28e7a87c/go.mod h1:qYjSd1tmJiuVoSICp7Py9/zD54O9uQQA3wuM6Gg4DFM= github.com/grafana/authlib/types v0.0.0-20250314102521-a77865c746c0/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/authlib/types v0.0.0-20250721184729-1593a38e4933/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= github.com/grafana/cog v0.0.37/go.mod h1:UDstzYqMdgIROmbfkHL8fB9XWQO2lnf5z+4W/eJo4Dc= @@ -2163,6 +2165,7 @@ google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojt google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= diff --git a/packages/grafana-data/src/types/featureToggles.gen.ts b/packages/grafana-data/src/types/featureToggles.gen.ts index 5fd9f7960c5..a47d3db8f8e 100644 --- a/packages/grafana-data/src/types/featureToggles.gen.ts +++ b/packages/grafana-data/src/types/featureToggles.gen.ts @@ -885,6 +885,11 @@ export interface FeatureToggles { */ alertingJiraIntegration?: boolean; /** + * + * @default true + */ + alertingUseNewSimplifiedRoutingHashAlgorithm?: boolean; + /** * Use the scopes navigation endpoint instead of the dashboardbindings endpoint */ useScopesNavigationEndpoint?: boolean; diff --git a/pkg/aggregator/go.mod b/pkg/aggregator/go.mod index 94417c4784d..f7a3cdc5c93 100644 --- a/pkg/aggregator/go.mod +++ b/pkg/aggregator/go.mod @@ -153,7 +153,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/pkg/aggregator/go.sum b/pkg/aggregator/go.sum index 74a776206b2..f294a97f777 100644 --- a/pkg/aggregator/go.sum +++ b/pkg/aggregator/go.sum @@ -445,8 +445,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go. google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/api/admin_users.go b/pkg/api/admin_users.go index 64453f86688..84f4b7b4ae3 100644 --- a/pkg/api/admin_users.go +++ b/pkg/api/admin_users.go @@ -17,6 +17,7 @@ import ( contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/services/org" + pref "github.com/grafana/grafana/pkg/services/preference" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/web" ) @@ -222,7 +223,7 @@ func (hs *HTTPServer) AdminDeleteUser(c *contextmodel.ReqContext) response.Respo return nil }) g.Go(func() error { - if err := hs.preferenceService.DeleteByUser(ctx, cmd.UserID); err != nil { + if err := hs.preferenceService.Delete(ctx, &pref.DeleteCommand{UserID: cmd.UserID}); err != nil { return err } return nil diff --git a/pkg/api/plugins_test.go b/pkg/api/plugins_test.go index 4409395ebe2..cf8a2eca399 100644 --- a/pkg/api/plugins_test.go +++ b/pkg/api/plugins_test.go @@ -12,19 +12,18 @@ import ( "strings" "testing" + "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/grafana/grafana-plugin-sdk-go/backend" - "github.com/grafana/grafana/pkg/plugins/auth" - "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/log/logtest" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/plugins/auth" "github.com/grafana/grafana/pkg/plugins/config" "github.com/grafana/grafana/pkg/plugins/manager/fakes" "github.com/grafana/grafana/pkg/plugins/manager/filestore" @@ -528,9 +527,12 @@ func callGetPluginAsset(sc *scenarioContext) { func pluginAssetScenario(t *testing.T, desc string, url string, urlPattern string, cfg *setting.Cfg, pluginRegistry registry.Service, fn scenarioFunc) { t.Run(fmt.Sprintf("%s %s", desc, url), func(t *testing.T) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs := HTTPServer{ Cfg: cfg, - pluginStore: pluginstore.New(pluginRegistry, &fakes.FakeLoader{}), + pluginStore: store, pluginFileStore: filestore.ProvideService(pluginRegistry), log: log.NewNopLogger(), pluginsCDNService: pluginscdn.ProvideService(&config.PluginManagementCfg{ @@ -640,12 +642,14 @@ func Test_PluginsList_AccessControl(t *testing.T) { for _, tc := range tcs { t.Run(tc.desc, func(t *testing.T) { server := SetupAPITestServer(t, func(hs *HTTPServer) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs.Cfg = setting.NewCfg() hs.PluginSettings = &pluginSettings - hs.pluginStore = pluginstore.New(pluginRegistry, &fakes.FakeLoader{}) + hs.pluginStore = store hs.pluginFileStore = filestore.ProvideService(pluginRegistry) hs.managedPluginsService = managedplugins.NewNoop() - var err error hs.pluginsUpdateChecker, err = updatemanager.ProvidePluginsService( hs.Cfg, hs.pluginStore, @@ -828,9 +832,12 @@ func Test_PluginsSettings(t *testing.T) { for _, tc := range tcs { t.Run(tc.desc, func(t *testing.T) { server := SetupAPITestServer(t, func(hs *HTTPServer) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs.Cfg = setting.NewCfg() hs.PluginSettings = &pluginSettings - hs.pluginStore = pluginstore.New(pluginRegistry, &fakes.FakeLoader{}) + hs.pluginStore = store hs.pluginFileStore = filestore.ProvideService(pluginRegistry) errTracker := pluginerrs.ProvideErrorTracker() if tc.errCode != "" { @@ -844,7 +851,6 @@ func Test_PluginsSettings(t *testing.T) { sig := signature.ProvideService(pCfg, statickey.New()) hs.pluginAssets = pluginassets.ProvideService(pCfg, pluginCDN, sig, hs.pluginStore) hs.pluginErrorResolver = pluginerrs.ProvideStore(errTracker) - var err error hs.pluginsUpdateChecker, err = updatemanager.ProvidePluginsService( hs.Cfg, hs.pluginStore, @@ -896,9 +902,12 @@ func Test_UpdatePluginSetting(t *testing.T) { t.Run("should return an error when trying to disable an auto-enabled plugin", func(t *testing.T) { server := SetupAPITestServer(t, func(hs *HTTPServer) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs.Cfg = setting.NewCfg() hs.PluginSettings = &pluginSettings - hs.pluginStore = pluginstore.New(pluginRegistry, &fakes.FakeLoader{}) + hs.pluginStore = store hs.pluginFileStore = filestore.ProvideService(pluginRegistry) hs.managedPluginsService = managedplugins.NewNoop() hs.log = log.NewNopLogger() diff --git a/pkg/apimachinery/go.mod b/pkg/apimachinery/go.mod index 58ffee8df9c..e084fdf1fad 100644 --- a/pkg/apimachinery/go.mod +++ b/pkg/apimachinery/go.mod @@ -3,8 +3,8 @@ module github.com/grafana/grafana/pkg/apimachinery go 1.24.6 require ( - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // @grafana/identity-access-team - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // @grafana/identity-access-team + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team github.com/stretchr/testify v1.11.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.34.1 @@ -51,7 +51,7 @@ require ( golang.org/x/text v0.29.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/pkg/apimachinery/go.sum b/pkg/apimachinery/go.sum index 990dfc62413..2cd16740dd0 100644 --- a/pkg/apimachinery/go.sum +++ b/pkg/apimachinery/go.sum @@ -30,10 +30,10 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -132,8 +132,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/apimachinery/validation/validation.go b/pkg/apimachinery/validation/validation.go index 4778db398f3..777eb59fa46 100644 --- a/pkg/apimachinery/validation/validation.go +++ b/pkg/apimachinery/validation/validation.go @@ -23,9 +23,9 @@ const qualifiedNameFmt string = "^(" + qnameCharFmt + qnameExtCharFmt + "*)?" + const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" const alphaCharFmt string = "[A-Za-z]" -const resourceCharFmt string = "[A-Za-z0-9]" // alpha numeric +const resourceCharFmt string = "[A-Za-z0-9-]" // alpha numeric plus dashes const resourceFmt string = "^" + alphaCharFmt + resourceCharFmt + "*$" -const resourceErrMsg string = "must consist of alphanumeric characters" +const resourceErrMsg string = "must consist of alphanumeric characters and dashes, and must start with an alphabetic character" var ( grafanaNameRegexp = regexp.MustCompile(grafanaNameFmt).MatchString diff --git a/pkg/apimachinery/validation/validation_test.go b/pkg/apimachinery/validation/validation_test.go index 2e699097cdd..3bb5fa61ad9 100644 --- a/pkg/apimachinery/validation/validation_test.go +++ b/pkg/apimachinery/validation/validation_test.go @@ -198,16 +198,17 @@ func TestValidation(t *testing.T) { "folders", "folders123", "aaa", + "hello-world", + "hello-world-", }, }, { name: "bad input", expect: []string{ - "resource must consist of alphanumeric characters (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9]*$')", + "resource must consist of alphanumeric characters and dashes, and must start with an alphabetic character (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9-]*$')", }, input: []string{ "_bad_input", "hello world", - "hello-world", "hello!", "hello~", "hello ", diff --git a/pkg/apiserver/go.mod b/pkg/apiserver/go.mod index 65144ec803f..c039fb3b21c 100644 --- a/pkg/apiserver/go.mod +++ b/pkg/apiserver/go.mod @@ -4,7 +4,7 @@ go 1.24.6 require ( github.com/google/go-cmp v0.7.0 - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250514132646-acbc7b54ed9e github.com/prometheus/client_golang v1.23.2 @@ -44,7 +44,7 @@ require ( github.com/google/gnostic-models v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect @@ -95,7 +95,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/apiserver/go.sum b/pkg/apiserver/go.sum index 8e730d8e153..a28bdfada90 100644 --- a/pkg/apiserver/go.sum +++ b/pkg/apiserver/go.sum @@ -63,10 +63,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk/logging v0.45.0 h1:0SH6nYZpiLBZRwUq4J6+1vo8xuHKJjnO95/2pGOoA8w= @@ -265,8 +265,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go. google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/build/go.mod b/pkg/build/go.mod index 47b17e88e05..66435b14493 100644 --- a/pkg/build/go.mod +++ b/pkg/build/go.mod @@ -17,7 +17,7 @@ require ( golang.org/x/sync v0.17.0 // @grafana/alerting-backend golang.org/x/text v0.29.0 // indirect; @grafana/grafana-backend-group google.golang.org/grpc v1.75.1 // indirect; @grafana/plugins-platform-backend - google.golang.org/protobuf v1.36.8 // indirect; @grafana/plugins-platform-backend + google.golang.org/protobuf v1.36.9 // indirect; @grafana/plugins-platform-backend ) require ( diff --git a/pkg/build/go.sum b/pkg/build/go.sum index d27999d2748..646c193b2c7 100644 --- a/pkg/build/go.sum +++ b/pkg/build/go.sum @@ -111,8 +111,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/expr/convert_to_full_long.go b/pkg/expr/convert_to_full_long.go index 6c17badb1d8..dd626ca6421 100644 --- a/pkg/expr/convert_to_full_long.go +++ b/pkg/expr/convert_to_full_long.go @@ -14,8 +14,8 @@ const ( SQLDisplayFieldName = "__display_name__" // These are not types in the SDK or dataplane contract yet. - numericFullLongType = "numeric_full_long" - timeseriesFullLongType = "time_series_full_long" + numericFullLongType = "numeric-full-long" + timeseriesFullLongType = "timeseries-full-long" ) func ConvertToFullLong(frames data.Frames) (data.Frames, error) { diff --git a/pkg/promlib/go.mod b/pkg/promlib/go.mod index a7d1382aa29..dc5c76c2f29 100644 --- a/pkg/promlib/go.mod +++ b/pkg/promlib/go.mod @@ -12,7 +12,7 @@ require ( github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 - google.golang.org/protobuf v1.36.8 + google.golang.org/protobuf v1.36.9 k8s.io/apimachinery v0.34.1 ) diff --git a/pkg/promlib/go.sum b/pkg/promlib/go.sum index 1f25f1f8201..2b4eab7619c 100644 --- a/pkg/promlib/go.sum +++ b/pkg/promlib/go.sum @@ -420,8 +420,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/promlib/models/query.go b/pkg/promlib/models/query.go index ae56d929ec2..ec32b7bc3cd 100644 --- a/pkg/promlib/models/query.go +++ b/pkg/promlib/models/query.go @@ -1,6 +1,7 @@ package models import ( + "context" "embed" "encoding/json" "fmt" @@ -15,6 +16,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + glog "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana/pkg/promlib/intervalv2" ) @@ -190,7 +192,7 @@ type internalQueryModel struct { Interval string `json:"interval,omitempty"` } -func Parse(span trace.Span, query backend.DataQuery, dsScrapeInterval string, intervalCalculator intervalv2.Calculator, fromAlert bool, enableScope bool) (*Query, error) { +func Parse(ctx context.Context, log glog.Logger, span trace.Span, query backend.DataQuery, dsScrapeInterval string, intervalCalculator intervalv2.Calculator, fromAlert bool, enableScope bool) (*Query, error) { model := &internalQueryModel{} if err := json.Unmarshal(query.JSON, model); err != nil { return nil, err @@ -241,6 +243,7 @@ func Parse(span trace.Span, query backend.DataQuery, dsScrapeInterval string, in } if len(scopeFilters) > 0 || len(model.AdhocFilters) > 0 || len(model.GroupByKeys) > 0 { + log.Info("Applying scope filters", "scopeFiltersCount", len(scopeFilters), "adhocFiltersCount", len(model.AdhocFilters), "groupByKeysCount", len(model.GroupByKeys)) expr, err = ApplyFiltersAndGroupBy(expr, scopeFilters, model.AdhocFilters, model.GroupByKeys) if err != nil { return nil, err diff --git a/pkg/promlib/models/query_test.go b/pkg/promlib/models/query_test.go index 8826e0c37c5..cc2b2637442 100644 --- a/pkg/promlib/models/query_test.go +++ b/pkg/promlib/models/query_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" + "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana/pkg/promlib/intervalv2" "github.com/grafana/grafana/pkg/promlib/models" ) @@ -44,7 +45,7 @@ func TestParse(t *testing.T) { RefID: "A", } - res, err := models.Parse(span, q, "15s", intervalCalculator, true, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, true, false) require.NoError(t, err) require.Equal(t, false, res.ExemplarQuery) }) @@ -61,7 +62,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Second*30, res.Step) }) @@ -79,7 +80,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Second*15, res.Step) }) @@ -97,7 +98,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Minute*20, res.Step) }) @@ -115,7 +116,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Minute*2, res.Step) }) @@ -133,7 +134,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "240s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "240s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Minute*4, res.Step) }) @@ -152,7 +153,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [2m]})", res.Expr) require.Equal(t, 120*time.Second, res.Step) @@ -173,7 +174,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [2m]})", res.Expr) }) @@ -192,7 +193,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [120000]})", res.Expr) }) @@ -211,7 +212,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [120000]}) + rate(ALERTS{job=\"test\" [2m]})", res.Expr) }) @@ -230,7 +231,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [120000]}) + rate(ALERTS{job=\"test\" [2m]})", res.Expr) }) @@ -248,7 +249,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800s]})", res.Expr) }) @@ -266,7 +267,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800]})", res.Expr) }) @@ -284,7 +285,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800s]})", res.Expr) }) @@ -302,7 +303,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [0]})", res.Expr) }) @@ -320,7 +321,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [1]})", res.Expr) }) @@ -338,7 +339,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800000]})", res.Expr) }) @@ -356,7 +357,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [20]})", res.Expr) }) @@ -375,7 +376,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [20m0s]})", res.Expr) }) @@ -394,7 +395,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 1*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [1m0s]})", res.Expr) require.Equal(t, 1*time.Minute, res.Step) @@ -413,7 +414,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [135000]})", res.Expr) }) @@ -431,7 +432,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [135000]}) + rate(ALERTS{job=\"test\" [2m15s]})", res.Expr) }) @@ -450,7 +451,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "A", res.RefId) }) @@ -468,7 +469,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [135000]}) + rate(ALERTS{job=\"test\" [2m15s]})", res.Expr) }) @@ -487,7 +488,7 @@ func TestParse(t *testing.T) { "range": true }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, true, res.RangeQuery) }) @@ -507,7 +508,7 @@ func TestParse(t *testing.T) { "instant": true }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, true, res.RangeQuery) require.Equal(t, true, res.InstantQuery) @@ -526,7 +527,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, true, res.RangeQuery) }) @@ -659,7 +660,7 @@ func TestRateInterval(t *testing.T) { t.Run(tt.name, func(t *testing.T) { q := mockQuery(tt.args.expr, tt.args.interval, tt.args.intervalMs, tt.args.timeRange) q.MaxDataPoints = 12384 - res, err := models.Parse(span, q, tt.args.dsScrapeInterval, intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, tt.args.dsScrapeInterval, intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, tt.want.Expr, res.Expr) require.Equal(t, tt.want.Step, res.Step) @@ -694,7 +695,7 @@ func TestRateInterval(t *testing.T) { "utcOffsetSec":3600 }`), } - res, err := models.Parse(span, query, "30s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, query, "30s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "sum(rate(process_cpu_seconds_total[2m0s]))", res.Expr) require.Equal(t, 30*time.Second, res.Step) @@ -729,7 +730,7 @@ func TestRateInterval(t *testing.T) { "maxDataPoints": 1055 }`), } - res, err := models.Parse(span, query, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, query, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "sum(rate(cache_requests_total[1m0s]))", res.Expr) require.Equal(t, 15*time.Second, res.Step) diff --git a/pkg/promlib/querydata/request.go b/pkg/promlib/querydata/request.go index 5f3f9556648..ff3cefc4458 100644 --- a/pkg/promlib/querydata/request.go +++ b/pkg/promlib/querydata/request.go @@ -129,7 +129,7 @@ func (s *QueryData) handleQuery(ctx context.Context, bq backend.DataQuery, fromA hasPromQLScopeFeatureFlag bool) *backend.DataResponse { traceCtx, span := s.tracer.Start(ctx, "datasource.prometheus") defer span.End() - query, err := models.Parse(span, bq, s.TimeInterval, s.intervalCalculator, fromAlert, hasPromQLScopeFeatureFlag) + query, err := models.Parse(ctx, s.log, span, bq, s.TimeInterval, s.intervalCalculator, fromAlert, hasPromQLScopeFeatureFlag) if err != nil { return &backend.DataResponse{ Error: err, @@ -145,7 +145,7 @@ func (s *QueryData) handleQuery(ctx context.Context, bq backend.DataQuery, fromA func (s *QueryData) fetch(traceCtx context.Context, client *client.Client, q *models.Query) *backend.DataResponse { logger := s.log.FromContext(traceCtx) - logger.Debug("Sending query", "start", q.Start, "end", q.End, "step", q.Step, "query", q.Expr /*, "queryTimeout", s.QueryTimeout*/) + logger.Debug("Sending query", "start", q.Start, "end", q.End, "step", q.Step, "query", q.Expr) dr := &backend.DataResponse{ Frames: data.Frames{}, diff --git a/pkg/registry/apis/datasource/register.go b/pkg/registry/apis/datasource/register.go index dc31d8cbd9e..8d24e69a03e 100644 --- a/pkg/registry/apis/datasource/register.go +++ b/pkg/registry/apis/datasource/register.go @@ -3,7 +3,9 @@ package datasource import ( "context" "encoding/json" + "errors" "fmt" + "path/filepath" "github.com/prometheus/client_golang/prometheus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,14 +22,16 @@ import ( "github.com/grafana/grafana/pkg/apimachinery/utils" datasource "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1" query "github.com/grafana/grafana/pkg/apis/query/v0alpha1" + "github.com/grafana/grafana/pkg/configprovider" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/plugins/manager/sources" "github.com/grafana/grafana/pkg/promlib/models" "github.com/grafana/grafana/pkg/registry/apis/query/queryschema" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/apiserver/builder" "github.com/grafana/grafana/pkg/services/featuremgmt" - "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore" + "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/tsdb/grafana-testdata-datasource/kinds" ) @@ -47,12 +51,12 @@ type DataSourceAPIBuilder struct { } func RegisterAPIService( + cfgProvider configprovider.ConfigProvider, features featuremgmt.FeatureToggles, apiRegistrar builder.APIRegistrar, pluginClient plugins.Client, // access to everything datasources ScopedPluginDatasourceProvider, contextProvider PluginContextWrapper, - pluginStore pluginstore.Store, accessControl accesscontrol.AccessControl, reg prometheus.Registerer, ) (*DataSourceAPIBuilder, error) { @@ -66,25 +70,43 @@ func RegisterAPIService( var err error var builder *DataSourceAPIBuilder - all := pluginStore.Plugins(context.Background(), plugins.TypeDataSource) + + cfg, err := cfgProvider.Get(context.Background()) + if err != nil { + return nil, err + } + pluginJSONs, err := getCorePlugins(cfg) + if err != nil { + return nil, err + } + ids := []string{ "grafana-testdata-datasource", "prometheus", "graphite", } - for _, ds := range all { - if explictPluginList && !slices.Contains(ids, ds.ID) { + for _, pluginJSON := range pluginJSONs { + if explictPluginList && !slices.Contains(ids, pluginJSON.ID) { continue // skip this one } - if !ds.Backend { + if !pluginJSON.Backend { continue // skip frontend only plugins } - builder, err = NewDataSourceAPIBuilder(ds.JSONData, - pluginClient, - datasources.GetDatasourceProvider(ds.JSONData), + if pluginJSON.Type != plugins.TypeDataSource { + continue // skip non-datasource plugins + } + + client, ok := pluginClient.(PluginClient) + if !ok { + return nil, fmt.Errorf("plugin client is not a PluginClient: %T", pluginClient) + } + + builder, err = NewDataSourceAPIBuilder(pluginJSON, + client, + datasources.GetDatasourceProvider(pluginJSON), contextProvider, accessControl, features.IsEnabledGlobally(featuremgmt.FlagDatasourceQueryTypes), @@ -277,3 +299,22 @@ func (b *DataSourceAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.Op return oas, err } + +func getCorePlugins(cfg *setting.Cfg) ([]plugins.JSONData, error) { + coreDataSourcesPath := filepath.Join(cfg.StaticRootPath, "app", "plugins", "datasource") + coreDataSourcesSrc := sources.NewLocalSource( + plugins.ClassCore, + []string{coreDataSourcesPath}, + ) + + res, err := coreDataSourcesSrc.Discover(context.Background()) + if err != nil { + return nil, errors.New("failed to load core data source plugins") + } + + pluginJSONs := make([]plugins.JSONData, 0, len(res)) + for _, p := range res { + pluginJSONs = append(pluginJSONs, p.Primary.JSONData) + } + return pluginJSONs, nil +} diff --git a/pkg/registry/apis/datasource/sub_query.go b/pkg/registry/apis/datasource/sub_query.go index 51f8b90d430..258447418bc 100644 --- a/pkg/registry/apis/datasource/sub_query.go +++ b/pkg/registry/apis/datasource/sub_query.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1" + "github.com/grafana/grafana/pkg/apimachinery/errutil" query "github.com/grafana/grafana/pkg/apis/query/v0alpha1" query_headers "github.com/grafana/grafana/pkg/registry/apis/query" "github.com/grafana/grafana/pkg/services/datasources" @@ -96,6 +97,22 @@ func (r *subQueryREST) Connect(ctx context.Context, name string, opts runtime.Ob PluginContext: pluginCtx, Headers: query_headers.ExtractKnownHeaders(req.Header), }) + + // all errors get converted into k8 errors when sent in responder.Error and lose important context like downstream info + var e errutil.Error + if errors.As(err, &e) && e.Source == errutil.SourceDownstream { + responder.Object(int(backend.StatusBadRequest), + &query.QueryDataResponse{QueryDataResponse: backend.QueryDataResponse{Responses: map[string]backend.DataResponse{ + "A": { + Error: errors.New(e.LogMessage), + ErrorSource: backend.ErrorSourceDownstream, + Status: backend.StatusBadRequest, + }, + }}}, + ) + return + } + if err != nil { responder.Error(err) return diff --git a/pkg/registry/apis/folders/sub_access.go b/pkg/registry/apis/folders/sub_access.go index ff068a14a33..314d9318444 100644 --- a/pkg/registry/apis/folders/sub_access.go +++ b/pkg/registry/apis/folders/sub_access.go @@ -87,8 +87,7 @@ func (r *subAccessREST) getAccessInfo(ctx context.Context, name string) (*folder Resource: foldersV1.RESOURCE, Namespace: ns.Value, Name: name, - Folder: obj.GetFolder(), - }) + }, obj.GetFolder()) return tmp.Allowed } diff --git a/pkg/registry/apis/preferences/legacy/preferences.go b/pkg/registry/apis/preferences/legacy/preferences.go index e9fd6b32aed..ab7eb2d4891 100644 --- a/pkg/registry/apis/preferences/legacy/preferences.go +++ b/pkg/registry/apis/preferences/legacy/preferences.go @@ -13,11 +13,13 @@ import ( requestK8s "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" + authlib "github.com/grafana/authlib/types" preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1" "github.com/grafana/grafana/pkg/apimachinery/identity" utilsOrig "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/registry/apis/preferences/utils" "github.com/grafana/grafana/pkg/services/apiserver/endpoints/request" + pref "github.com/grafana/grafana/pkg/services/preference" ) var ( @@ -26,13 +28,14 @@ var ( _ rest.Getter = (*preferenceStorage)(nil) _ rest.Lister = (*preferenceStorage)(nil) _ rest.Storage = (*preferenceStorage)(nil) - // _ rest.Creater = (*preferenceStorage)(nil) - // _ rest.Updater = (*preferenceStorage)(nil) - // _ rest.GracefulDeleter = (*preferenceStorage)(nil) + _ rest.Creater = (*preferenceStorage)(nil) + _ rest.Updater = (*preferenceStorage)(nil) + _ rest.GracefulDeleter = (*preferenceStorage)(nil) ) -func NewPreferencesStorage(namespacer request.NamespaceMapper, sql *LegacySQL) *preferenceStorage { +func NewPreferencesStorage(pref pref.Service, namespacer request.NamespaceMapper, sql *LegacySQL) *preferenceStorage { return &preferenceStorage{ + prefs: pref, namespacer: namespacer, sql: sql, tableConverter: preferences.PreferencesResourceInfo.TableConverter(), @@ -43,6 +46,7 @@ type preferenceStorage struct { namespacer request.NamespaceMapper tableConverter rest.TableConvertor sql *LegacySQL + prefs pref.Service } func (s *preferenceStorage) New() runtime.Object { @@ -73,7 +77,7 @@ func (s *preferenceStorage) List(ctx context.Context, options *internalversion.L return nil, err } ns := requestK8s.NamespaceValue(ctx) - if user.GetIsGrafanaAdmin() { + if user.GetIdentityType() == authlib.TypeAccessPolicy { user = nil // nill user can see everything } return s.sql.ListPreferences(ctx, ns, user, true) @@ -116,6 +120,151 @@ func (s *preferenceStorage) Get(ctx context.Context, name string, options *metav return nil, preferences.PreferencesResourceInfo.NewNotFound(name) } +func (s *preferenceStorage) save(ctx context.Context, obj runtime.Object) (runtime.Object, error) { + user, err := identity.GetRequester(ctx) + if err != nil { + return nil, err + } + + p, ok := obj.(*preferences.Preferences) + if !ok { + return nil, fmt.Errorf("expected preferences") + } + + owner, ok := utils.ParseOwnerFromName(p.Name) + if !ok { + return nil, fmt.Errorf("invalid name") + } + + cmd := &pref.SavePreferenceCommand{ + OrgID: user.GetOrgID(), + HomeDashboardUID: p.Spec.HomeDashboardUID, + } + if p.Spec.Timezone != nil { + cmd.Timezone = *p.Spec.Timezone + } + if p.Spec.WeekStart != nil { + cmd.WeekStart = *p.Spec.WeekStart + } + if p.Spec.Theme != nil { + cmd.Theme = *p.Spec.Theme + } + if p.Spec.Language != nil { + cmd.Language = *p.Spec.Language + } + if p.Spec.RegionalFormat != nil { + cmd.RegionalFormat = *p.Spec.RegionalFormat + } + if p.Spec.QueryHistory != nil { + cmd.QueryHistory = &pref.QueryHistoryPreference{ + HomeTab: *p.Spec.QueryHistory.HomeTab, + } + } + if p.Spec.Navbar != nil { + cmd.Navbar = &pref.NavbarPreference{ + BookmarkUrls: p.Spec.Navbar.BookmarkUrls, + } + } + if p.Spec.CookiePreferences != nil { + cmd.CookiePreferences = []pref.CookieType{} + if p.Spec.CookiePreferences.Analytics != nil { + cmd.CookiePreferences = append(cmd.CookiePreferences, "analytics") + } + if p.Spec.CookiePreferences.Functional != nil { + cmd.CookiePreferences = append(cmd.CookiePreferences, "functional") + } + if p.Spec.CookiePreferences.Performance != nil { + cmd.CookiePreferences = append(cmd.CookiePreferences, "performance") + } + } + + switch owner.Owner { + case utils.NamespaceResourceOwner: + // the org ID is already set + + case utils.UserResourceOwner: + if user.GetIdentifier() != owner.Identifier { + return nil, fmt.Errorf("only the user can save preferences") + } + cmd.UserID, err = user.GetInternalID() + if err != nil { + return nil, err + } + case utils.TeamResourceOwner: + cmd.TeamID, err = s.sql.getLegacyTeamID(ctx, user.GetOrgID(), owner.Identifier) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("unsupported name") + } + + if err = s.prefs.Save(ctx, cmd); err != nil { + return nil, err + } + return s.Get(ctx, owner.AsName(), &metav1.GetOptions{}) +} + +// Create implements rest.Creater. +func (s *preferenceStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + return s.save(ctx, obj) +} + +// Update implements rest.Updater. +func (s *preferenceStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { + old, err := s.Get(ctx, name, &metav1.GetOptions{}) + if err != nil { + return nil, false, err + } + + obj, err := objInfo.UpdatedObject(ctx, old) + if err != nil { + return nil, false, err + } + + obj, err = s.save(ctx, obj) + return obj, false, err +} + +// Delete implements rest.GracefulDeleter. +func (s *preferenceStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { + user, err := identity.GetRequester(ctx) + if err != nil { + return nil, false, err + } + + owner, ok := utils.ParseOwnerFromName(name) + if !ok { + return nil, false, fmt.Errorf("invalid name") + } + + cmd := &pref.DeleteCommand{} + + switch owner.Owner { + case utils.TeamResourceOwner: + cmd.TeamID, err = user.GetInternalID() + if err != nil { + return nil, false, err + } + + case utils.UserResourceOwner: + cmd.UserID, err = user.GetInternalID() + if err != nil { + return nil, false, err + } + + case utils.NamespaceResourceOwner: + cmd.OrgID = user.GetOrgID() + + default: + return nil, false, fmt.Errorf("unsupported owner") + } + + err = s.prefs.Delete(ctx, cmd) + return nil, (err == nil), err +} + func asPreferencesResource(ns string, p *preferenceModel) preferences.Preferences { owner := utils.OwnerReference{} if p.TeamUID.Valid { diff --git a/pkg/registry/apis/preferences/legacy/sql.go b/pkg/registry/apis/preferences/legacy/sql.go index bf3e8900f81..fb4cc60fcbf 100644 --- a/pkg/registry/apis/preferences/legacy/sql.go +++ b/pkg/registry/apis/preferences/legacy/sql.go @@ -49,7 +49,7 @@ func NewLegacySQL(db legacysql.LegacyDatabaseProvider) *LegacySQL { } // NOTE: this does not support paging -- lets check if that will be a problem in cloud -func (s *LegacySQL) GetStars(ctx context.Context, orgId int64, user string) ([]dashboardStars, int64, error) { +func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user string) ([]dashboardStars, int64, error) { var max sql.NullString sql, err := s.db(ctx) if err != nil { @@ -120,7 +120,10 @@ func (s *LegacySQL) GetStars(ctx context.Context, orgId int64, user string) ([]d return nil, 0, fmt.Errorf("unable to get RV %w", err) } if max.Valid && max.String != "" { - fmt.Printf("max RV: %s\n", max.String) + t, _ := time.Parse(time.RFC3339, max.String) + if !t.IsZero() { + updated = t + } } else { updated = s.startup } @@ -206,7 +209,10 @@ func (s *LegacySQL) listPreferences(ctx context.Context, return nil, 0, fmt.Errorf("unable to get RV %w", err) } if max.Valid && max.String != "" { - fmt.Printf("max RV: %s\n", max.String) + t, _ := time.Parse(time.RFC3339, max.String) + if !t.IsZero() { + rv.Time = t + } } else { rv.Time = s.startup } @@ -229,7 +235,7 @@ func (s *LegacySQL) ListPreferences(ctx context.Context, ns string, user identit found, rv, err := s.listPreferences(ctx, ns, info.OrgID, func(req *preferencesQuery) (bool, error) { if user != nil { - req.UserUID = user.GetRawIdentifier() + req.UserUID = user.GetIdentifier() teams, err = s.GetTeams(ctx, &identity.StaticRequester{ OrgID: info.OrgID, UserUID: req.UserUID, @@ -243,7 +249,7 @@ func (s *LegacySQL) ListPreferences(ctx context.Context, ns string, user identit return true } if p.UserUID.String != "" { - return user.GetRawIdentifier() == p.UserUID.String + return user.GetIdentifier() == p.UserUID.String } if p.TeamUID.String != "" { return slices.Contains(teams, p.TeamUID.String) @@ -293,3 +299,15 @@ func (s *LegacySQL) GetTeams(ctx context.Context, id authlib.AuthInfo, admin boo err = sess.Select(ctx, &teams, q, req.GetArgs()...) return teams, err } + +func (s *LegacySQL) getLegacyTeamID(ctx context.Context, orgId int64, team string) (int64, error) { + sql, err := s.db(ctx) + if err != nil { + return 0, err + } + + var id int64 + sess := sql.DB.GetSqlxSession() + err = sess.Select(ctx, &id, "SELECT id FROM team WHERE org_id=? AND uid=?", orgId, team) + return id, err +} diff --git a/pkg/registry/apis/preferences/legacy/stars.go b/pkg/registry/apis/preferences/legacy/stars.go index 87f5300799d..b048c249170 100644 --- a/pkg/registry/apis/preferences/legacy/stars.go +++ b/pkg/registry/apis/preferences/legacy/stars.go @@ -97,13 +97,13 @@ func (s *DashboardStarsStorage) List(ctx context.Context, options *internalversi return nil, err } - user := userInfo.GetUID() - if userInfo.GetIsGrafanaAdmin() || userInfo.GetIdentityType() == authlib.TypeAccessPolicy { + user := userInfo.GetIdentifier() + if userInfo.GetIdentityType() == authlib.TypeAccessPolicy { user = "" // can see everything } list := &preferences.StarsList{} - found, rv, err := s.sql.GetStars(ctx, ns.OrgID, user) + found, rv, err := s.sql.getDashboardStars(ctx, ns.OrgID, user) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func (s *DashboardStarsStorage) Get(ctx context.Context, name string, options *m return nil, err } - found, _, err := s.sql.GetStars(ctx, ns.OrgID, owner.Identifier) + found, _, err := s.sql.getDashboardStars(ctx, ns.OrgID, owner.Identifier) if err != nil { return nil, err } @@ -187,7 +187,7 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Star }}, err } - current, _, err := s.sql.GetStars(ctx, ns.OrgID, owner.Identifier) + current, _, err := s.sql.getDashboardStars(ctx, ns.OrgID, owner.Identifier) if err != nil { return nil, err } diff --git a/pkg/registry/apis/preferences/merged_preferences.go b/pkg/registry/apis/preferences/preferences_merged.go similarity index 100% rename from pkg/registry/apis/preferences/merged_preferences.go rename to pkg/registry/apis/preferences/preferences_merged.go diff --git a/pkg/registry/apis/preferences/merged_preferences_test.go b/pkg/registry/apis/preferences/preferences_merged_test.go similarity index 100% rename from pkg/registry/apis/preferences/merged_preferences_test.go rename to pkg/registry/apis/preferences/preferences_merged_test.go diff --git a/pkg/registry/apis/preferences/register.go b/pkg/registry/apis/preferences/register.go index 6fff343f233..ff186c6bdb2 100644 --- a/pkg/registry/apis/preferences/register.go +++ b/pkg/registry/apis/preferences/register.go @@ -72,7 +72,7 @@ func RegisterAPIService( namespacer := request.GetNamespaceMapper(cfg) if prefs != nil { - builder.legacyPrefs = legacy.NewPreferencesStorage(namespacer, sql) + builder.legacyPrefs = legacy.NewPreferencesStorage(prefs, namespacer, sql) } if stars != nil { builder.legacyStars = legacy.NewDashboardStarsStorage(stars, users, namespacer, sql) diff --git a/pkg/registry/apis/preferences/update_stars.go b/pkg/registry/apis/preferences/stars_update.go similarity index 100% rename from pkg/registry/apis/preferences/update_stars.go rename to pkg/registry/apis/preferences/stars_update.go diff --git a/pkg/registry/apis/preferences/update_stars_test.go b/pkg/registry/apis/preferences/stars_update_test.go similarity index 100% rename from pkg/registry/apis/preferences/update_stars_test.go rename to pkg/registry/apis/preferences/stars_update_test.go diff --git a/pkg/registry/apis/provisioning/controller/finalizers.go b/pkg/registry/apis/provisioning/controller/finalizers.go index 18f00c17c40..d9c41ee4c15 100644 --- a/pkg/registry/apis/provisioning/controller/finalizers.go +++ b/pkg/registry/apis/provisioning/controller/finalizers.go @@ -120,8 +120,8 @@ func (f *finalizer) processExistingItems( Group: item.Group, Resource: item.Resource, }) - logger.Error("error getting client for resource", "resource", item.Resource, "error", err) if err != nil { + logger.Error("error getting client for resource", "resource", item.Resource, "error", err) return count, err } diff --git a/pkg/registry/apis/provisioning/jobs.go b/pkg/registry/apis/provisioning/jobs.go index 4a344b60192..b1e174a04b7 100644 --- a/pkg/registry/apis/provisioning/jobs.go +++ b/pkg/registry/apis/provisioning/jobs.go @@ -20,16 +20,23 @@ type JobQueueGetter interface { } type jobsConnector struct { - repoGetter RepoGetter - jobs JobQueueGetter - historic jobs.HistoryReader + repoGetter RepoGetter + statusPatcherProvider StatusPatcherProvider + jobs JobQueueGetter + historic jobs.HistoryReader } -func NewJobsConnector(repoGetter RepoGetter, jobs JobQueueGetter, historic jobs.HistoryReader) *jobsConnector { +func NewJobsConnector( + repoGetter RepoGetter, + statusPatcherProvider StatusPatcherProvider, + jobs JobQueueGetter, + historic jobs.HistoryReader, +) *jobsConnector { return &jobsConnector{ - repoGetter: repoGetter, - jobs: jobs, - historic: historic, + repoGetter: repoGetter, + statusPatcherProvider: statusPatcherProvider, + jobs: jobs, + historic: historic, } } @@ -125,6 +132,23 @@ func (c *jobsConnector) Connect( } spec.Repository = name + // If a sync job is being created, we should update its status to pending. + if spec.Pull != nil { + err = c.statusPatcherProvider.GetStatusPatcher().Patch(ctx, cfg, map[string]interface{}{ + "op": "replace", + "path": "/status/sync", + "value": &provisioning.SyncStatus{ + State: provisioning.JobStatePending, + LastRef: cfg.Status.Sync.LastRef, + Started: time.Now().UnixMilli(), + }, + }) + if err != nil { + responder.Error(err) + return + } + } + job, err := c.jobs.GetJobQueue().Insert(ctx, cfg.Namespace, spec) if err != nil { responder.Error(err) diff --git a/pkg/registry/apis/provisioning/register.go b/pkg/registry/apis/provisioning/register.go index a63f42bb987..5590cbeafe5 100644 --- a/pkg/registry/apis/provisioning/register.go +++ b/pkg/registry/apis/provisioning/register.go @@ -92,6 +92,7 @@ type APIBuilder struct { allowedTargets []provisioning.SyncTargetType allowImageRendering bool + minSyncInterval time.Duration features featuremgmt.FeatureToggles usageStats usagestats.Service @@ -144,6 +145,7 @@ func NewAPIBuilder( allowedTargets []provisioning.SyncTargetType, restConfigGetter func(context.Context) (*clientrest.Config, error), allowImageRendering bool, + minSyncInterval time.Duration, registry prometheus.Registerer, newStandaloneClientFactoryFunc func(loopbackConfigProvider apiserver.RestConfigProvider) resources.ClientFactory, // optional, only used for standalone apiserver ) *APIBuilder { @@ -156,6 +158,11 @@ func NewAPIBuilder( parsers := resources.NewParserFactory(clients) resourceLister := resources.NewResourceListerForMigrations(unified, legacyMigrator, storageStatus) + // do not allow minsync interval to be less than 10 + if minSyncInterval <= 10*time.Second { + minSyncInterval = 10 * time.Second + } + b := &APIBuilder{ onlyApiServer: onlyApiServer, tracer: tracer, @@ -175,6 +182,7 @@ func NewAPIBuilder( allowedTargets: allowedTargets, restConfigGetter: restConfigGetter, allowImageRendering: allowImageRendering, + minSyncInterval: minSyncInterval, registry: registry, } @@ -261,6 +269,7 @@ func RegisterAPIService( allowedTargets, nil, // will use loopback instead cfg.ProvisioningAllowImageRendering, + cfg.ProvisioningMinSyncInterval, reg, nil, ) @@ -287,7 +296,7 @@ func (b *APIBuilder) GetAuthorizer() authorizer.Authorizer { Name: a.GetName(), Namespace: a.GetNamespace(), Subresource: a.GetSubresource(), - }) + }, "") if err != nil { return authorizer.DecisionDeny, "failed to perform authorization", err } @@ -485,7 +494,7 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI storage[provisioning.RepositoryResourceInfo.StoragePath("history")] = &historySubresource{ repoGetter: b, } - storage[provisioning.RepositoryResourceInfo.StoragePath("jobs")] = NewJobsConnector(b, b, jobHistory) + storage[provisioning.RepositoryResourceInfo.StoragePath("jobs")] = NewJobsConnector(b, b, b, jobHistory) // Add any extra storage for _, extra := range b.extras { @@ -587,6 +596,11 @@ func (b *APIBuilder) Validate(ctx context.Context, a admission.Attributes, o adm "sync target is not supported")) } + if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < int64(b.minSyncInterval.Seconds()) { + list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"), + cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", int64(b.minSyncInterval.Seconds())))) + } + if !b.allowImageRendering && cfg.Spec.GitHub != nil && cfg.Spec.GitHub.GenerateDashboardPreviews { list = append(list, field.Invalid(field.NewPath("spec", "generateDashboardPreviews"), diff --git a/pkg/registry/apis/provisioning/register_validate_test.go b/pkg/registry/apis/provisioning/register_validate_test.go new file mode 100644 index 00000000000..0834b534cc0 --- /dev/null +++ b/pkg/registry/apis/provisioning/register_validate_test.go @@ -0,0 +1,114 @@ +package provisioning + +import ( + "context" + "testing" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + + "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestAPIBuilderValidate(t *testing.T) { + factory := repository.NewMockFactory(t) + mockRepo := repository.NewMockConfigRepository(t) + mockRepo.EXPECT().Validate().Return(nil) + factory.EXPECT().Build(mock.Anything, mock.Anything).Return(mockRepo, nil) + b := &APIBuilder{ + repoFactory: factory, + allowedTargets: []v0alpha1.SyncTargetType{v0alpha1.SyncTargetTypeFolder}, + allowImageRendering: false, + minSyncInterval: 30 * time.Second, + } + + t.Run("min sync interval is less than 10 seconds", func(t *testing.T) { + cfg := &v0alpha1.Repository{ + Spec: v0alpha1.RepositorySpec{ + Title: "repo", + Type: v0alpha1.GitHubRepositoryType, + Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeFolder, IntervalSeconds: 5}, + }, + } + mockRepo.EXPECT().Config().Return(cfg) + + obj := newRepoObj("repo1", "default", cfg.Spec, v0alpha1.RepositoryStatus{}) + err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil) + require.Error(t, err) + require.True(t, apierrors.IsInvalid(err)) + }) + + t.Run("image rendering is not enabled", func(t *testing.T) { + cfg2 := &v0alpha1.Repository{ + Spec: v0alpha1.RepositorySpec{ + Title: "repo", + Type: v0alpha1.GitHubRepositoryType, + Sync: v0alpha1.SyncOptions{Enabled: false, Target: v0alpha1.SyncTargetTypeFolder}, + GitHub: &v0alpha1.GitHubRepositoryConfig{URL: "https://github.com/acme/repo", Branch: "main", GenerateDashboardPreviews: true}, + }, + } + mockRepo.EXPECT().Config().Return(cfg2) + + obj := newRepoObj("repo2", "default", cfg2.Spec, v0alpha1.RepositoryStatus{}) + err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil) + require.Error(t, err) + require.True(t, apierrors.IsInvalid(err)) + }) + + t.Run("sync target is not supported", func(t *testing.T) { + cfg3 := &v0alpha1.Repository{ + Spec: v0alpha1.RepositorySpec{ + Title: "repo", + Type: v0alpha1.GitHubRepositoryType, + Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeInstance}, + }, + } + mockRepo.EXPECT().Config().Return(cfg3) + + obj := newRepoObj("repo3", "default", cfg3.Spec, v0alpha1.RepositoryStatus{}) + err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil) + require.Error(t, err) + require.True(t, apierrors.IsInvalid(err)) + }) +} + +func newRepoObj(name string, ns string, spec v0alpha1.RepositorySpec, status v0alpha1.RepositoryStatus) *v0alpha1.Repository { + return &v0alpha1.Repository{ + TypeMeta: metav1.TypeMeta{APIVersion: v0alpha1.APIVERSION, Kind: "Repository"}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: spec, + Status: status, + } +} + +func newAttributes(obj, old runtime.Object, op admission.Operation) admission.Attributes { + return admission.NewAttributesRecord( + obj, + old, + v0alpha1.RepositoryResourceInfo.GroupVersionKind(), + "default", + func() string { + if obj != nil { + return obj.(*v0alpha1.Repository).Name + } + if old != nil { + return old.(*v0alpha1.Repository).Name + } + return "" + }(), + v0alpha1.RepositoryResourceInfo.GroupVersionResource(), + "", + op, + nil, + false, + &user.DefaultInfo{}, + ) +} diff --git a/pkg/registry/apis/provisioning/resources/dualwriter.go b/pkg/registry/apis/provisioning/resources/dualwriter.go index b9af3416364..052771af700 100644 --- a/pkg/registry/apis/provisioning/resources/dualwriter.go +++ b/pkg/registry/apis/provisioning/resources/dualwriter.go @@ -515,9 +515,8 @@ func (r *DualReadWriter) authorize(ctx context.Context, parsed *ParsedResource, Resource: parsed.GVR.Resource, Namespace: id.GetNamespace(), Name: name, - Folder: parsed.Meta.GetFolder(), Verb: verb, - }) + }, parsed.Meta.GetFolder()) if err != nil || !rsp.Allowed { return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(), fmt.Errorf("no access to read the embedded file")) diff --git a/pkg/registry/apis/secret/inline/inline_secure_value.go b/pkg/registry/apis/secret/inline/inline_secure_value.go index 8ad810532c4..f6805938554 100644 --- a/pkg/registry/apis/secret/inline/inline_secure_value.go +++ b/pkg/registry/apis/secret/inline/inline_secure_value.go @@ -142,7 +142,7 @@ func (s *LocalInlineSecureValueService) canIdentityReadSecureValue(ctx context.C Resource: secretv1beta1.SecureValuesResourceInfo.GroupResource().Resource, Namespace: namespace.String(), Name: name, - }) + }, "") if err != nil { return fmt.Errorf("checking access for secure value %s: %w", name, err) } diff --git a/pkg/registry/backgroundsvcs/adapter/dependencies.go b/pkg/registry/backgroundsvcs/adapter/dependencies.go index 20e31754490..6256ebeda8a 100644 --- a/pkg/registry/backgroundsvcs/adapter/dependencies.go +++ b/pkg/registry/backgroundsvcs/adapter/dependencies.go @@ -3,9 +3,21 @@ package adapter import ( "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/modules" + "github.com/grafana/grafana/pkg/services/pluginsintegration/plugininstaller" + "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore" + "github.com/grafana/grafana/pkg/services/provisioning" ) const ( + // PluginStore is the module name for the plugin store service. + PluginStore = pluginstore.ServiceName + + // PluginInstaller is the module name for the plugin installer service. + PluginInstaller = plugininstaller.ServiceName + + // Provisioning is the module name for the provisioning service. + Provisioning = provisioning.ServiceName + // Tracing is the module name for the tracing service. Tracing = tracing.ServiceName @@ -29,7 +41,10 @@ func dependencyMap() map[string][]string { return map[string][]string{ Tracing: {}, GrafanaAPIServer: {Tracing}, - Core: {GrafanaAPIServer}, + PluginStore: {GrafanaAPIServer}, + PluginInstaller: {PluginStore}, + Provisioning: {PluginStore, PluginInstaller}, + Core: {GrafanaAPIServer, PluginStore, PluginInstaller, Provisioning}, BackgroundServices: {Core}, } } diff --git a/pkg/registry/backgroundsvcs/adapter/manager.go b/pkg/registry/backgroundsvcs/adapter/manager.go index edd84418bd8..8db7d9e9c2c 100644 --- a/pkg/registry/backgroundsvcs/adapter/manager.go +++ b/pkg/registry/backgroundsvcs/adapter/manager.go @@ -14,7 +14,7 @@ import ( ) var ( - stopTimeout = 30 * time.Second + stopTimeout = 5 * time.Second ) type ManagerAdapter struct { @@ -63,6 +63,7 @@ func (m *ManagerAdapter) starting(ctx context.Context) error { // skip disabled services if s, ok := bgSvc.(registry.CanBeDisabled); ok && s.IsDisabled() { logger.Debug("Skipping disabled service", "service", namedService.ServiceName()) + manager.RegisterInvisibleModule(namedService.ServiceName(), nil) continue } diff --git a/pkg/registry/backgroundsvcs/adapter/service.go b/pkg/registry/backgroundsvcs/adapter/service.go index 037d7f392b2..50bf24b4a2d 100644 --- a/pkg/registry/backgroundsvcs/adapter/service.go +++ b/pkg/registry/backgroundsvcs/adapter/service.go @@ -18,9 +18,10 @@ var _ services.NamedService = &serviceAdapter{} // The adapter uses dskit's BasicService with a custom RunningFn: // - Starting phase: No-op, transitions immediately to Running // - Running phase: Delegates to the wrapped service's Run method -// - Stopping phase: No-op, transitions immediately to Terminated/Failed +// - Stopping phase: Closes the stop channel to signal the service to stop type serviceAdapter struct { - *services.BasicService + services.NamedService + stopCh chan struct{} name string service registry.BackgroundService } @@ -36,8 +37,9 @@ func asNamedService(service registry.BackgroundService) *serviceAdapter { a := &serviceAdapter{ name: name, service: service, + stopCh: make(chan struct{}), } - a.BasicService = services.NewBasicService(nil, a.run, nil).WithName(name) + a.NamedService = services.NewBasicService(nil, a.running, a.stopping).WithName(name) return a } @@ -46,13 +48,24 @@ func asNamedService(service registry.BackgroundService) *serviceAdapter { // background service's Run method. If the background service completes without // error, the adapter waits for context cancellation (service stop) before // transitioning to Stopping state, ensuring proper dskit service lifecycle. -func (a *serviceAdapter) run(ctx context.Context) error { - err := a.service.Run(ctx) +func (a *serviceAdapter) running(ctx context.Context) error { + serviceCtx, serviceCancel := context.WithCancel(ctx) + go func() { + <-a.stopCh + serviceCancel() + }() + + err := a.service.Run(serviceCtx) if err != nil && !errors.Is(err, context.Canceled) { return err } // wait for context cancellation to transition to Stopping state. // this prevents the service from causing it's dependents to stop prematurely. - <-ctx.Done() + <-serviceCtx.Done() + return nil +} + +func (a *serviceAdapter) stopping(_ error) error { + close(a.stopCh) return nil } diff --git a/pkg/registry/backgroundsvcs/adapter/service_test.go b/pkg/registry/backgroundsvcs/adapter/service_test.go index 368c596d395..ed357d66443 100644 --- a/pkg/registry/backgroundsvcs/adapter/service_test.go +++ b/pkg/registry/backgroundsvcs/adapter/service_test.go @@ -16,7 +16,7 @@ func TestAsNamedService(t *testing.T) { adapter := asNamedService(mockSvc) require.NotNil(t, adapter) - require.NotNil(t, adapter.BasicService) + require.NotNil(t, adapter.NamedService) require.Equal(t, mockSvc, adapter.service) expectedName := reflect.TypeOf(mockSvc).String() diff --git a/pkg/server/module_server.go b/pkg/server/module_server.go index 0b477c3aa97..5c1517d354d 100644 --- a/pkg/server/module_server.go +++ b/pkg/server/module_server.go @@ -187,7 +187,7 @@ func (s *ModuleServer) Run() error { if err != nil { return nil, err } - return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig) + return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig, s.httpServerRouter) }) m.RegisterModule(modules.ZanzanaServer, func() (services.Service, error) { diff --git a/pkg/server/wire_gen.go b/pkg/server/wire_gen.go index 6b99759d59a..bcbb569df18 100644 --- a/pkg/server/wire_gen.go +++ b/pkg/server/wire_gen.go @@ -547,10 +547,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api } errorRegistry := pluginerrs.ProvideErrorTracker() loaderLoader := loader.ProvideService(pluginManagementCfg, discovery, bootstrap, validate, initialize, terminate, errorRegistry) - pluginstoreService, err := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) - if err != nil { - return nil, err - } + pluginstoreService := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) filestoreService := filestore.ProvideService(inMemory) fileStoreManager := dashboards.ProvideFileStoreManager(pluginstoreService, filestoreService) folderPermissionsService, err := ossaccesscontrol.ProvideFolderPermissions(cfg, featureToggles, routeRegisterImpl, sqlStore, accessControl, ossLicensingService, folderimplService, acimplService, teamService, userService, actionSetService) @@ -810,7 +807,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api apiService := api4.ProvideService(cfg, routeRegisterImpl, accessControl, userService, authinfoimplService, ossGroups, identitySynchronizer, orgService, ldapImpl, userAuthTokenService, bundleregistryService) dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService) snapshotsAPIBuilder := dashboardsnapshot.RegisterAPIService(serviceImpl, apiserverService, cfg, featureToggles, sqlStore, registerer) - dataSourceAPIBuilder, err := datasource.RegisterAPIService(featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, accessControl, registerer) + dataSourceAPIBuilder, err := datasource.RegisterAPIService(configProvider, featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, accessControl, registerer) if err != nil { return nil, err } @@ -1152,10 +1149,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac } errorRegistry := pluginerrs.ProvideErrorTracker() loaderLoader := loader.ProvideService(pluginManagementCfg, discovery, bootstrap, validate, initialize, terminate, errorRegistry) - pluginstoreService, err := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) - if err != nil { - return nil, err - } + pluginstoreService := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) filestoreService := filestore.ProvideService(inMemory) fileStoreManager := dashboards.ProvideFileStoreManager(pluginstoreService, filestoreService) folderPermissionsService, err := ossaccesscontrol.ProvideFolderPermissions(cfg, featureToggles, routeRegisterImpl, sqlStore, accessControl, ossLicensingService, folderimplService, acimplService, teamService, userService, actionSetService) @@ -1417,7 +1411,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac apiService := api4.ProvideService(cfg, routeRegisterImpl, accessControl, userService, authinfoimplService, ossGroups, identitySynchronizer, orgService, ldapImpl, userAuthTokenService, bundleregistryService) dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService) snapshotsAPIBuilder := dashboardsnapshot.RegisterAPIService(serviceImpl, apiserverService, cfg, featureToggles, sqlStore, registerer) - dataSourceAPIBuilder, err := datasource.RegisterAPIService(featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, accessControl, registerer) + dataSourceAPIBuilder, err := datasource.RegisterAPIService(configProvider, featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, accessControl, registerer) if err != nil { return nil, err } diff --git a/pkg/services/accesscontrol/authorizer.go b/pkg/services/accesscontrol/authorizer.go index a1bc224eae8..ac180fb2ac7 100644 --- a/pkg/services/accesscontrol/authorizer.go +++ b/pkg/services/accesscontrol/authorizer.go @@ -85,7 +85,7 @@ type LegacyAccessClient struct { opts map[string]ResourceAuthorizerOptions } -func (c *LegacyAccessClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest) (claims.CheckResponse, error) { +func (c *LegacyAccessClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest, folder string) (claims.CheckResponse, error) { ident, ok := id.(identity.Requester) if !ok { return claims.CheckResponse{}, errors.New("expected identity.Requester for legacy access control") @@ -140,6 +140,9 @@ func (c *LegacyAccessClient) Check(ctx context.Context, id claims.AuthInfo, req return claims.CheckResponse{}, err } + // NOTE: folder is looked up again in the evaluator: + // pkg/services/accesscontrol/acimpl/accesscontrol.go#L77 + return claims.CheckResponse{Allowed: allowed}, nil } diff --git a/pkg/services/accesscontrol/authorizer_test.go b/pkg/services/accesscontrol/authorizer_test.go index 515186f2a57..04853bdd7ee 100644 --- a/pkg/services/accesscontrol/authorizer_test.go +++ b/pkg/services/accesscontrol/authorizer_test.go @@ -24,7 +24,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Resource: "dashboards", Namespace: "default", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, false, res.Allowed) }) @@ -47,7 +47,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, false, res.Allowed) @@ -70,7 +70,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Verb: "list", Namespace: "default", Resource: "dashboards", - }) + }, "") assert.NoError(t, err) assert.Equal(t, true, res.Allowed) @@ -94,7 +94,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, true, res.Allowed) @@ -119,7 +119,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, true, res.Allowed) @@ -129,7 +129,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, false, res.Allowed) diff --git a/pkg/services/apiserver/auth/authorizer/resource.go b/pkg/services/apiserver/auth/authorizer/resource.go index 96b8e8dd4cb..b86f6b40f09 100644 --- a/pkg/services/apiserver/auth/authorizer/resource.go +++ b/pkg/services/apiserver/auth/authorizer/resource.go @@ -36,7 +36,7 @@ func (r ResourceAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri Name: attr.GetName(), Subresource: attr.GetSubresource(), Path: attr.GetPath(), - }) + }, "") // NOTE: we do not know the folder in this context if err != nil { return authorizer.DecisionDeny, "", err diff --git a/pkg/services/apiserver/service.go b/pkg/services/apiserver/service.go index 69381a2bc99..86706dcbb48 100644 --- a/pkg/services/apiserver/service.go +++ b/pkg/services/apiserver/service.go @@ -85,7 +85,6 @@ type service struct { features featuremgmt.FeatureToggles log log.Logger - stopCh chan struct{} stoppedCh chan error db db.DB @@ -148,7 +147,6 @@ func ProvideService( cfg: cfg, features: features, rr: rr, - stopCh: make(chan struct{}), builders: []builder.APIGroupBuilder{}, authorizer: authorizer.NewGrafanaBuiltInSTAuthorizer(cfg), tracing: tracing, @@ -242,11 +240,8 @@ func (s *service) Run(ctx context.Context) error { if err := s.StartAsync(ctx); err != nil { return err } - - if err := s.AwaitRunning(ctx); err != nil { - return err - } - return s.AwaitTerminated(ctx) + stopCtx := context.Background() + return s.AwaitTerminated(stopCtx) } func (s *service) RegisterAPI(b builder.APIGroupBuilder) { diff --git a/pkg/services/authz/zanzana/client/client.go b/pkg/services/authz/zanzana/client/client.go index 47b463a2e07..05266afba83 100644 --- a/pkg/services/authz/zanzana/client/client.go +++ b/pkg/services/authz/zanzana/client/client.go @@ -3,11 +3,12 @@ package client import ( "context" + "go.opentelemetry.io/otel" + "google.golang.org/grpc" + authzlib "github.com/grafana/authlib/authz" authzv1 "github.com/grafana/authlib/authz/proto/v1" authlib "github.com/grafana/authlib/types" - "go.opentelemetry.io/otel" - "google.golang.org/grpc" "github.com/grafana/grafana/pkg/infra/log" authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1" @@ -36,11 +37,11 @@ func New(cc grpc.ClientConnInterface) (*Client, error) { return c, nil } -func (c *Client) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (c *Client) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check") defer span.End() - return c.authzlibclient.Check(ctx, id, req) + return c.authzlibclient.Check(ctx, id, req, folder) } func (c *Client) Compile(ctx context.Context, id authlib.AuthInfo, req authlib.ListRequest) (authlib.ItemChecker, authlib.Zookie, error) { diff --git a/pkg/services/authz/zanzana/client/noop.go b/pkg/services/authz/zanzana/client/noop.go index 26fbbab573b..419a9b73201 100644 --- a/pkg/services/authz/zanzana/client/noop.go +++ b/pkg/services/authz/zanzana/client/noop.go @@ -16,7 +16,7 @@ func NewNoop() *NoopClient { type NoopClient struct{} -func (nc *NoopClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (nc *NoopClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { return authlib.CheckResponse{}, nil } diff --git a/pkg/services/authz/zanzana/client/shadow_client.go b/pkg/services/authz/zanzana/client/shadow_client.go index e2ba4e01c9c..4fb4c545ff0 100644 --- a/pkg/services/authz/zanzana/client/shadow_client.go +++ b/pkg/services/authz/zanzana/client/shadow_client.go @@ -3,9 +3,9 @@ package client import ( "context" - authlib "github.com/grafana/authlib/types" "github.com/prometheus/client_golang/prometheus" + authlib "github.com/grafana/authlib/types" "github.com/grafana/grafana/pkg/infra/log" ) @@ -29,7 +29,7 @@ func WithShadowClient(accessClient authlib.AccessClient, zanzanaClient authlib.A return client } -func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { acResChan := make(chan authlib.CheckResponse, 1) acErrChan := make(chan error, 1) @@ -42,7 +42,7 @@ func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authl defer timer.ObserveDuration() zanzanaCtx := context.WithoutCancel(ctx) - res, err := c.zanzanaClient.Check(zanzanaCtx, id, req) + res, err := c.zanzanaClient.Check(zanzanaCtx, id, req, folder) if err != nil { c.logger.Error("Failed to run zanzana check", "error", err) } @@ -61,7 +61,7 @@ func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authl }() timer := prometheus.NewTimer(c.metrics.evaluationsSeconds.WithLabelValues("rbac")) - res, err := c.accessClient.Check(ctx, id, req) + res, err := c.accessClient.Check(ctx, id, req, folder) timer.ObserveDuration() acResChan <- res acErrChan <- err diff --git a/pkg/services/authz/zanzana/zanzana.go b/pkg/services/authz/zanzana/zanzana.go index adf2dbab531..d7d1b1dfd45 100644 --- a/pkg/services/authz/zanzana/zanzana.go +++ b/pkg/services/authz/zanzana/zanzana.go @@ -7,7 +7,6 @@ import ( openfgav1 "github.com/openfga/api/proto/openfga/v1" authlib "github.com/grafana/authlib/types" - "github.com/grafana/grafana/pkg/services/authz/zanzana/common" ) @@ -123,7 +122,7 @@ func MergeFolderResourceTuples(a, b *openfgav1.TupleKey) { va.GetListValue().Values = append(va.GetListValue().Values, vb.GetListValue().Values...) } -func TranslateToCheckRequest(namespace, action, kind, folder, name string) (*authlib.CheckRequest, bool) { +func TranslateToCheckRequest(namespace, action, kind, name string) (*authlib.CheckRequest, bool) { translation, ok := resourceTranslations[kind] if !ok { @@ -146,7 +145,6 @@ func TranslateToCheckRequest(namespace, action, kind, folder, name string) (*aut Group: translation.group, Resource: translation.resource, Name: name, - Folder: folder, } return req, true diff --git a/pkg/services/dashboards/database/database.go b/pkg/services/dashboards/database/database.go index b6bfc346f51..e2909cbe46f 100644 --- a/pkg/services/dashboards/database/database.go +++ b/pkg/services/dashboards/database/database.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/services/dashboards" dashver "github.com/grafana/grafana/pkg/services/dashboardversion" "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/services/folder" "github.com/grafana/grafana/pkg/services/libraryelements/model" "github.com/grafana/grafana/pkg/services/quota" "github.com/grafana/grafana/pkg/services/sqlstore" @@ -96,6 +97,7 @@ func (d *dashboardStore) GetDashboardsByLibraryPanelUID(ctx context.Context, lib return connectedDashboards, err } +// nolint:gocyclo func (d *dashboardStore) ValidateDashboardBeforeSave(ctx context.Context, dash *dashboards.Dashboard, overwrite bool) (bool, error) { ctx, span := tracer.Start(ctx, "dashboards.database.ValidateDashboardBeforesave") defer span.End() @@ -107,7 +109,7 @@ func (d *dashboardStore) ValidateDashboardBeforeSave(ctx context.Context, dash * // we don't save FolderID in kubernetes object when saving through k8s // this block guarantees we save dashboards with folder_id and folder_uid in those cases - if !dash.IsFolder && dash.FolderUID != "" && dash.FolderID == 0 { // nolint:staticcheck + if !dash.IsFolder && dash.FolderUID != "" && dash.FolderID == 0 && dash.FolderUID != folder.GeneralFolderUID { // nolint:staticcheck var existing dashboards.Dashboard folderIdFound, err := sess.Where("uid=? AND org_id=?", dash.FolderUID, dash.OrgID).Get(&existing) if err != nil { diff --git a/pkg/services/featuremgmt/registry.go b/pkg/services/featuremgmt/registry.go index 9e368a4bc5c..54c66da34bc 100644 --- a/pkg/services/featuremgmt/registry.go +++ b/pkg/services/featuremgmt/registry.go @@ -1520,6 +1520,16 @@ var ( FrontendOnly: true, HideFromDocs: true, }, + { + Name: "alertingUseNewSimplifiedRoutingHashAlgorithm", + Description: "", + Stage: FeatureStagePublicPreview, + Owner: grafanaAlertingSquad, + HideFromAdminPage: true, + HideFromDocs: true, + RequiresRestart: true, + Expression: "true", + }, { Name: "useScopesNavigationEndpoint", Description: "Use the scopes navigation endpoint instead of the dashboardbindings endpoint", diff --git a/pkg/services/featuremgmt/toggles_gen.csv b/pkg/services/featuremgmt/toggles_gen.csv index 4e489bf84f9..b453c49da28 100644 --- a/pkg/services/featuremgmt/toggles_gen.csv +++ b/pkg/services/featuremgmt/toggles_gen.csv @@ -198,6 +198,7 @@ fetchRulesUsingPost,experimental,@grafana/alerting-squad,false,false,false newLogsPanel,experimental,@grafana/observability-logs,false,false,true grafanaconThemes,GA,@grafana/grafana-frontend-platform,false,true,false alertingJiraIntegration,experimental,@grafana/alerting-squad,false,false,true +alertingUseNewSimplifiedRoutingHashAlgorithm,preview,@grafana/alerting-squad,false,true,false useScopesNavigationEndpoint,experimental,@grafana/grafana-frontend-platform,false,false,true scopeSearchAllLevels,experimental,@grafana/grafana-frontend-platform,false,false,false alertingRuleVersionHistoryRestore,GA,@grafana/alerting-squad,false,false,true diff --git a/pkg/services/featuremgmt/toggles_gen.go b/pkg/services/featuremgmt/toggles_gen.go index 097cd66ab45..ba8b131f74f 100644 --- a/pkg/services/featuremgmt/toggles_gen.go +++ b/pkg/services/featuremgmt/toggles_gen.go @@ -803,6 +803,9 @@ const ( // Enables the new Jira integration for contact points in cloud alert managers. FlagAlertingJiraIntegration = "alertingJiraIntegration" + // FlagAlertingUseNewSimplifiedRoutingHashAlgorithm + FlagAlertingUseNewSimplifiedRoutingHashAlgorithm = "alertingUseNewSimplifiedRoutingHashAlgorithm" + // FlagUseScopesNavigationEndpoint // Use the scopes navigation endpoint instead of the dashboardbindings endpoint FlagUseScopesNavigationEndpoint = "useScopesNavigationEndpoint" diff --git a/pkg/services/featuremgmt/toggles_gen.json b/pkg/services/featuremgmt/toggles_gen.json index 7e32061c9d5..f2995c233b0 100644 --- a/pkg/services/featuremgmt/toggles_gen.json +++ b/pkg/services/featuremgmt/toggles_gen.json @@ -610,6 +610,46 @@ "expression": "true" } }, + { + "metadata": { + "name": "alertingUseNewSimplifiedRoutingHashAlgorithm", + "resourceVersion": "1759339813575", + "creationTimestamp": "2025-10-01T17:28:42Z", + "deletionTimestamp": "2025-10-01T17:29:29Z", + "annotations": { + "grafana.app/updatedTimestamp": "2025-10-01 17:30:13.575464 +0000 UTC" + } + }, + "spec": { + "description": "", + "stage": "preview", + "codeowner": "@grafana/alerting-squad", + "requiresRestart": true, + "hideFromAdminPage": true, + "hideFromDocs": true, + "expression": "true" + } + }, + { + "metadata": { + "name": "alertingUseOldSimplifiedRoutingHashAlgorithm", + "resourceVersion": "1759339782639", + "creationTimestamp": "2025-10-01T17:29:29Z", + "deletionTimestamp": "2025-10-01T17:30:13Z", + "annotations": { + "grafana.app/updatedTimestamp": "2025-10-01 17:29:42.63941 +0000 UTC" + } + }, + "spec": { + "description": "", + "stage": "deprecated", + "codeowner": "@grafana/alerting-squad", + "requiresRestart": true, + "hideFromAdminPage": true, + "hideFromDocs": true, + "expression": "false" + } + }, { "metadata": { "name": "alertmanagerRemotePrimary", diff --git a/pkg/services/navtree/models.go b/pkg/services/navtree/models.go index 51bbdf261b1..79b633515c1 100644 --- a/pkg/services/navtree/models.go +++ b/pkg/services/navtree/models.go @@ -48,7 +48,6 @@ const ( NavIDAlerting = "alerting" NavIDObservability = "observability" NavIDInfrastructure = "infrastructure" - NavIDFrontend = "frontend" NavIDReporting = "reports" NavIDApps = "apps" NavIDCfgGeneral = "cfg/general" diff --git a/pkg/services/navtree/navtreeimpl/applinks.go b/pkg/services/navtree/navtreeimpl/applinks.go index 5ae119d97ff..27a31a5bcff 100644 --- a/pkg/services/navtree/navtreeimpl/applinks.go +++ b/pkg/services/navtree/navtreeimpl/applinks.go @@ -260,10 +260,21 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n } } + sectionChildren := []*navtree.NavLink{appLink} + // asserts pages expand to root Observability section instead of it's own node + if plugin.ID == "grafana-asserts-app" { + sectionChildren = appLink.Children + + // keep current sorting if the pages, but above all the other apps + for _, child := range sectionChildren { + child.SortWeight = -100 + child.SortWeight + } + } + if sectionID == navtree.NavIDRoot { treeRoot.AddSection(appLink) } else if navNode := treeRoot.FindById(sectionID); navNode != nil { - navNode.Children = append(navNode.Children, appLink) + navNode.Children = append(navNode.Children, sectionChildren...) } else { switch sectionID { case navtree.NavIDApps: @@ -272,18 +283,19 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n Icon: "layer-group", SubTitle: "App plugins that extend the Grafana experience", Id: navtree.NavIDApps, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, SortWeight: navtree.WeightApps, Url: s.cfg.AppSubURL + "/apps", }) case navtree.NavIDObservability: + treeRoot.AddSection(&navtree.NavLink{ Text: "Observability", Id: navtree.NavIDObservability, SubTitle: "Monitor infrastructure and applications in real time with Grafana Cloud's fully managed observability suite", Icon: "heart-rate", SortWeight: navtree.WeightObservability, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, Url: s.cfg.AppSubURL + "/observability", }) case navtree.NavIDInfrastructure: @@ -293,19 +305,9 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n SubTitle: "Understand your infrastructure's health", Icon: "heart-rate", SortWeight: navtree.WeightInfrastructure, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, Url: s.cfg.AppSubURL + "/infrastructure", }) - case navtree.NavIDFrontend: - treeRoot.AddSection(&navtree.NavLink{ - Text: "Frontend", - Id: navtree.NavIDFrontend, - SubTitle: "Gain real user monitoring insights", - Icon: "frontend-observability", - SortWeight: navtree.WeightFrontend, - Children: []*navtree.NavLink{appLink}, - Url: s.cfg.AppSubURL + "/frontend", - }) case navtree.NavIDAlertsAndIncidents: alertsAndIncidentsChildren := []*navtree.NavLink{} for _, alertingNode := range alertingNodes { @@ -332,7 +334,7 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n SubTitle: "Optimize performance with k6 and Synthetic Monitoring insights", Icon: "k6", SortWeight: navtree.WeightTestingAndSynthetics, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, Url: s.cfg.AppSubURL + "/testing-and-synthetics", }) case navtree.NavIDAdaptiveTelemetry: @@ -372,11 +374,11 @@ func (s *ServiceImpl) hasAccessToInclude(c *contextmodel.ReqContext, pluginID st func (s *ServiceImpl) readNavigationSettings() { s.navigationAppConfig = map[string]NavigationAppConfig{ "grafana-asserts-app": {SectionID: navtree.NavIDObservability, SortWeight: 1, Icon: "asserts"}, - "grafana-app-observability-app": {SectionID: navtree.NavIDObservability, SortWeight: 2, Text: "Application"}, - "grafana-csp-app": {SectionID: navtree.NavIDObservability, SortWeight: 3, Icon: "cloud-provider"}, - "grafana-k8s-app": {SectionID: navtree.NavIDObservability, SortWeight: 4, Text: "Kubernetes"}, - "grafana-dbo11y-app": {SectionID: navtree.NavIDObservability, SortWeight: 5, Text: "Database"}, - "grafana-kowalski-app": {SectionID: navtree.NavIDObservability, SortWeight: 6, Text: "Frontend"}, + "grafana-kowalski-app": {SectionID: navtree.NavIDObservability, SortWeight: 2, Text: "Frontend"}, + "grafana-app-observability-app": {SectionID: navtree.NavIDObservability, SortWeight: 3, Text: "Application"}, + "grafana-dbo11y-app": {SectionID: navtree.NavIDObservability, SortWeight: 4, Text: "Database"}, + "grafana-k8s-app": {SectionID: navtree.NavIDObservability, SortWeight: 5, Text: "Kubernetes"}, + "grafana-csp-app": {SectionID: navtree.NavIDObservability, SortWeight: 6, Icon: "cloud-provider"}, "grafana-metricsdrilldown-app": {SectionID: navtree.NavIDDrilldown, SortWeight: 1, Text: "Metrics"}, "grafana-lokiexplore-app": {SectionID: navtree.NavIDDrilldown, SortWeight: 2, Text: "Logs"}, "grafana-exploretraces-app": {SectionID: navtree.NavIDDrilldown, SortWeight: 3, Text: "Traces"}, @@ -390,10 +392,10 @@ func (s *ServiceImpl) readNavigationSettings() { "grafana-slo-app": {SectionID: navtree.NavIDAlertsAndIncidents, SortWeight: 7}, "grafana-cloud-link-app": {SectionID: navtree.NavIDCfgPlugins, SortWeight: 3}, "grafana-costmanagementui-app": {SectionID: navtree.NavIDCfg, Text: "Cost management"}, - "grafana-adaptive-metrics-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 1, Text: "Adaptive Metrics", SubTitle: "Analyzes and reduces unused metrics and cardinality to help you focus on your most valuable performance data."}, - "grafana-adaptivelogs-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 2, Text: "Adaptive Logs", SubTitle: "Analyzes log patterns to drop repetitive lines and accelerate troubleshooting."}, - "grafana-adaptivetraces-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 3, Text: "Adaptive Traces", SubTitle: "Analyzes and retains the most valuable traces, providing the performance insights needed to resolve issues faster."}, - "grafana-adaptiveprofiles-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 4, Text: "Adaptive Profiles", SubTitle: "Analyzes application profiles to pinpoint the root cause of performance issues and accelerate resolution."}, + "grafana-adaptive-metrics-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 1}, + "grafana-adaptivelogs-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 2}, + "grafana-adaptivetraces-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 3}, + "grafana-adaptiveprofiles-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 4}, "grafana-attributions-app": {SectionID: navtree.NavIDCfg, Text: "Attributions"}, "grafana-logvolumeexplorer-app": {SectionID: navtree.NavIDCfg, Text: "Log Volume Explorer"}, "grafana-easystart-app": {SectionID: navtree.NavIDRoot, SortWeight: navtree.WeightApps + 1, Text: "Connections", Icon: "adjust-circle"}, diff --git a/pkg/services/navtree/navtreeimpl/applinks_test.go b/pkg/services/navtree/navtreeimpl/applinks_test.go index 87b0276f370..53c06601900 100644 --- a/pkg/services/navtree/navtreeimpl/applinks_test.go +++ b/pkg/services/navtree/navtreeimpl/applinks_test.go @@ -387,7 +387,7 @@ func TestReadingNavigationSettings(t *testing.T) { require.Equal(t, "dashboards", service.navigationAppConfig["grafana-k8s-app"].SectionID) require.Equal(t, "admin", service.navigationAppConfig["other-app"].SectionID) - require.Equal(t, int64(4), service.navigationAppConfig["grafana-k8s-app"].SortWeight) + require.Equal(t, int64(5), service.navigationAppConfig["grafana-k8s-app"].SortWeight) require.Equal(t, int64(12), service.navigationAppConfig["other-app"].SortWeight) require.Equal(t, "admin", service.navigationAppPathConfig["/a/grafana-k8s-app/foo"].SectionID) diff --git a/pkg/services/ngalert/api/api_alertmanager_test.go b/pkg/services/ngalert/api/api_alertmanager_test.go index bbe1df17155..433c4d173f6 100644 --- a/pkg/services/ngalert/api/api_alertmanager_test.go +++ b/pkg/services/ngalert/api/api_alertmanager_test.go @@ -291,7 +291,8 @@ func TestAlertmanagerAutogenConfig(t *testing.T) { 1: {AlertmanagerConfiguration: validConfig, OrgID: 1}, 2: {AlertmanagerConfiguration: validConfigWithoutAutogen, OrgID: 2}, } - sut.mam = createMultiOrgAlertmanager(t, configs) + ft := featuremgmt.WithFeatures(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm) + sut.mam = createMultiOrgAlertmanager(t, configs, withAMFeatureToggles(ft)) return sut, configs } @@ -577,9 +578,29 @@ func createSut(t *testing.T) AlertmanagerSrv { } } -func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration) *notifier.MultiOrgAlertmanager { +type createMultiOrgAMOptions struct { + featureToggles featuremgmt.FeatureToggles +} + +type createMultiOrgAMOptionsFunc func(*createMultiOrgAMOptions) + +func withAMFeatureToggles(toggles featuremgmt.FeatureToggles) createMultiOrgAMOptionsFunc { + return func(opts *createMultiOrgAMOptions) { + opts.featureToggles = toggles + } +} + +func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration, opts ...createMultiOrgAMOptionsFunc) *notifier.MultiOrgAlertmanager { t.Helper() + options := createMultiOrgAMOptions{ + featureToggles: featuremgmt.WithFeatures(), + } + + for _, opt := range opts { + opt(&options) + } + configStore := notifier.NewFakeConfigStore(t, configs) orgStore := notifier.NewFakeOrgStore(t, []int64{1, 2, 3}) provStore := ngfakes.NewFakeProvisioningStore() @@ -610,7 +631,7 @@ func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertC ngfakes.NewFakeReceiverPermissionsService(), log.New("testlogger"), secretsService, - featuremgmt.WithManager(), + options.featureToggles, nil, ) require.NoError(t, err) diff --git a/pkg/services/ngalert/api/api_testing.go b/pkg/services/ngalert/api/api_testing.go index e26f2a89f9a..ceda51a7a22 100644 --- a/pkg/services/ngalert/api/api_testing.go +++ b/pkg/services/ngalert/api/api_testing.go @@ -113,7 +113,7 @@ func (srv TestingApiSrv) RouteTestGrafanaRuleConfig(c *contextmodel.ReqContext, now, rule, results, - state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder), + state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder, srv.featureManager), nil, ) diff --git a/pkg/services/ngalert/models/notifications.go b/pkg/services/ngalert/models/notifications.go index 36e4df6ae0d..609ee724085 100644 --- a/pkg/services/ngalert/models/notifications.go +++ b/pkg/services/ngalert/models/notifications.go @@ -8,6 +8,7 @@ import ( "unsafe" "github.com/grafana/grafana-plugin-sdk-go/data" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/prometheus/common/model" ) @@ -102,12 +103,12 @@ func (s *NotificationSettings) Validate() error { // - AutogeneratedRouteLabel: "true" // - AutogeneratedRouteReceiverNameLabel: Receiver // - AutogeneratedRouteSettingsHashLabel: Fingerprint (if the NotificationSettings are not all default) -func (s *NotificationSettings) ToLabels() data.Labels { +func (s *NotificationSettings) ToLabels(features featuremgmt.FeatureToggles) data.Labels { result := make(data.Labels, 3) result[AutogeneratedRouteLabel] = "true" result[AutogeneratedRouteReceiverNameLabel] = s.Receiver if !s.IsAllDefault() { - result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint().String() + result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint(features).String() } return result } @@ -160,7 +161,7 @@ func NewDefaultNotificationSettings(receiver string) NotificationSettings { // Fingerprint calculates a hash value to uniquely identify a NotificationSettings by its attributes. // The hash is calculated by concatenating the strings and durations of the NotificationSettings attributes // and using an invalid UTF-8 sequence as a separator. -func (s *NotificationSettings) Fingerprint() data.Fingerprint { +func (s *NotificationSettings) Fingerprint(features featuremgmt.FeatureToggles) data.Fingerprint { h := fnv.New64() tmp := make([]byte, 8) @@ -192,7 +193,10 @@ func (s *NotificationSettings) Fingerprint() data.Fingerprint { } // Add a separator between the time intervals to avoid collisions // when all settings are the same including interval names except for the interval type (mute vs active). - _, _ = h.Write([]byte{255}) + // Use new algorithm by default, unless feature flag is explicitly disabled + if features == nil || (features != nil && features.IsEnabledGlobally(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm)) { + _, _ = h.Write([]byte{255}) + } for _, interval := range s.ActiveTimeIntervals { writeString(interval) } diff --git a/pkg/services/ngalert/models/notifications_test.go b/pkg/services/ngalert/models/notifications_test.go index 2e7df039895..1bacbe403e7 100644 --- a/pkg/services/ngalert/models/notifications_test.go +++ b/pkg/services/ngalert/models/notifications_test.go @@ -195,7 +195,7 @@ func TestNotificationSettingsLabels(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - labels := tt.notificationSettings.ToLabels() + labels := tt.notificationSettings.ToLabels(nil) require.Equal(t, tt.labels, labels) }) } @@ -219,7 +219,7 @@ func TestNotificationSettings_TimeIntervals(t *testing.T) { ActiveTimeIntervals: []string{timeInterval}, } - require.NotEqual(t, activeSettings.Fingerprint(), muteSettings.Fingerprint()) + require.NotEqual(t, activeSettings.Fingerprint(nil), muteSettings.Fingerprint(nil)) } func TestNormalizedGroupBy(t *testing.T) { diff --git a/pkg/services/ngalert/ngalert.go b/pkg/services/ngalert/ngalert.go index 2f18840d50a..a014048a46a 100644 --- a/pkg/services/ngalert/ngalert.go +++ b/pkg/services/ngalert/ngalert.go @@ -220,7 +220,7 @@ func (ng *AlertNG) init() error { Timeout: ng.Cfg.UnifiedAlerting.RemoteAlertmanager.Timeout, } autogenFn := func(ctx context.Context, logger log.Logger, orgID int64, cfg *definitions.PostableApiAlertingConfig, skipInvalid bool) error { - return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid) + return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid, ng.FeatureToggles) } // This function will be used by the MOA to create new Alertmanagers. diff --git a/pkg/services/ngalert/notifier/alertmanager.go b/pkg/services/ngalert/notifier/alertmanager.go index 8c833c14a2b..06c5b5e0cce 100644 --- a/pkg/services/ngalert/notifier/alertmanager.go +++ b/pkg/services/ngalert/notifier/alertmanager.go @@ -56,6 +56,7 @@ type alertmanager struct { DefaultConfiguration string decryptFn alertingNotify.GetDecryptedValueFn crypto Crypto + features featuremgmt.FeatureToggles } // maintenanceOptions represent the options for components that need maintenance on a frequency within the Alertmanager. @@ -155,6 +156,7 @@ func NewAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A logger: l.New("component", "alertmanager", opts.TenantKey, opts.TenantID), // similar to what the base does decryptFn: decryptFn, crypto: crypto, + features: featureToggles, } return am, nil @@ -344,7 +346,7 @@ func (am *alertmanager) applyConfig(ctx context.Context, cfg *apimodels.Postable templates := alertingNotify.PostableAPITemplatesToTemplateDefinitions(cfg.GetMergedTemplateDefinitions()) // Now add autogenerated config to the route. - err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid) + err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid, am.features) if err != nil { return false, err } diff --git a/pkg/services/ngalert/notifier/alertmanager_config.go b/pkg/services/ngalert/notifier/alertmanager_config.go index 25a24cb5441..9f101621ef8 100644 --- a/pkg/services/ngalert/notifier/alertmanager_config.go +++ b/pkg/services/ngalert/notifier/alertmanager_config.go @@ -133,7 +133,7 @@ func (moa *MultiOrgAlertmanager) GetAlertmanagerConfiguration(ctx context.Contex // Otherwise, broken settings (e.g. a receiver that doesn't exist) will cause the config returned here to be // different than the config currently in-use. // TODO: Preferably, we'd be getting the config directly from the in-memory AM so adding the autogen config would not be necessary. - err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true) + err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true, moa.featureManager) if err != nil { return definitions.GettableUserConfig{}, err } diff --git a/pkg/services/ngalert/notifier/autogen_alertmanager.go b/pkg/services/ngalert/notifier/autogen_alertmanager.go index 55f0201fcb3..7523e368ad5 100644 --- a/pkg/services/ngalert/notifier/autogen_alertmanager.go +++ b/pkg/services/ngalert/notifier/autogen_alertmanager.go @@ -12,6 +12,7 @@ import ( "golang.org/x/exp/maps" "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" ) @@ -22,8 +23,8 @@ type autogenRuleStore interface { // AddAutogenConfig creates the autogenerated configuration and adds it to the given apiAlertingConfig. // If skipInvalid is true, then invalid notification settings are skipped, otherwise an error is returned. -func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) error { - autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid) +func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) error { + autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid, features) if err != nil { return err } @@ -39,7 +40,7 @@ func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store // newAutogeneratedRoute creates a new autogenerated route based on the notification settings for the given org. // cfg is used to construct the settings validator and to ensure we create a dedicated route for each receiver. // skipInvalid is used to skip invalid settings instead of returning an error. -func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) (autogeneratedRoute, error) { +func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) (autogeneratedRoute, error) { settings, err := store.ListNotificationSettings(ctx, models.ListNotificationSettingsQuery{OrgID: orgId}) if err != nil { return autogeneratedRoute{}, fmt.Errorf("failed to list alert rules: %w", err) @@ -50,7 +51,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s // contact point even if no rules are using it. This will prevent race conditions between AM sync and rule sync. for _, receiver := range cfg.GetReceivers() { setting := models.NewDefaultNotificationSettings(receiver.GetName()) - fp := setting.Fingerprint() + fp := setting.Fingerprint(features) notificationSettings[fp] = setting } @@ -65,7 +66,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s } return autogeneratedRoute{}, fmt.Errorf("invalid notification settings for rule %s: %w", ruleKey.UID, err) } - fp := setting.Fingerprint() + fp := setting.Fingerprint(features) // Keep only unique settings. if _, ok := notificationSettings[fp]; ok { continue diff --git a/pkg/services/ngalert/notifier/autogen_alertmanager_test.go b/pkg/services/ngalert/notifier/autogen_alertmanager_test.go index 78239c06479..bf376502caa 100644 --- a/pkg/services/ngalert/notifier/autogen_alertmanager_test.go +++ b/pkg/services/ngalert/notifier/autogen_alertmanager_test.go @@ -290,7 +290,7 @@ func TestAddAutogenConfig(t *testing.T) { store.notificationSettings[orgId][models.AlertRuleKey{OrgID: orgId, UID: util.GenerateShortUID()}] = []models.NotificationSettings{setting} } - err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid) + err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid, nil) if tt.expErrorContains != "" { require.Error(t, err) require.ErrorContains(t, err, tt.expErrorContains) diff --git a/pkg/services/ngalert/schedule/alert_rule.go b/pkg/services/ngalert/schedule/alert_rule.go index 523dcf0d86c..3991df89e77 100644 --- a/pkg/services/ngalert/schedule/alert_rule.go +++ b/pkg/services/ngalert/schedule/alert_rule.go @@ -471,7 +471,7 @@ func (a *alertRule) evaluate(ctx context.Context, e *Evaluation, span trace.Span e.scheduledAt, e.rule, results, - state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder), + state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder, a.featureToggles), func(ctx context.Context, statesToSend state.StateTransitions) { start := a.clock.Now() alerts := a.send(ctx, logger, statesToSend) diff --git a/pkg/services/ngalert/schedule/alert_rule_test.go b/pkg/services/ngalert/schedule/alert_rule_test.go index 5e301c5aa08..48be6bb4a40 100644 --- a/pkg/services/ngalert/schedule/alert_rule_test.go +++ b/pkg/services/ngalert/schedule/alert_rule_test.go @@ -1317,7 +1317,7 @@ func stateForRule(rule *models.AlertRule, ts time.Time, evalState eval.State) *s for k, v := range rule.Labels { s.Labels[k] = v } - for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true) { + for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true, featuremgmt.WithFeatures()) { if _, ok := s.Labels[k]; !ok { s.Labels[k] = v } diff --git a/pkg/services/ngalert/schedule/registry.go b/pkg/services/ngalert/schedule/registry.go index 240c62bdc40..ede71c94d75 100644 --- a/pkg/services/ngalert/schedule/registry.go +++ b/pkg/services/ngalert/schedule/registry.go @@ -304,7 +304,7 @@ func (r ruleWithFolder) Fingerprint() fingerprint { } for _, setting := range rule.NotificationSettings { - binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint())) + binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint(nil))) writeBytes(tmp) } diff --git a/pkg/services/ngalert/state/state.go b/pkg/services/ngalert/state/state.go index db807953b13..9df04681353 100644 --- a/pkg/services/ngalert/state/state.go +++ b/pkg/services/ngalert/state/state.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/apimachinery/errutil" "github.com/grafana/grafana/pkg/expr" "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ngalert/eval" "github.com/grafana/grafana/pkg/services/ngalert/models" "github.com/grafana/grafana/pkg/services/screenshot" @@ -753,7 +754,7 @@ func ParseFormattedState(stateStr string) (eval.State, string, error) { } // GetRuleExtraLabels returns a map of built-in labels that should be added to an alert before it is sent to the Alertmanager or its state is cached. -func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool) map[string]string { +func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool, features featuremgmt.FeatureToggles) map[string]string { extraLabels := make(map[string]string, 4) extraLabels[alertingModels.NamespaceUIDLabel] = rule.NamespaceUID @@ -771,7 +772,7 @@ func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string ignored, _ := json.Marshal(rule.NotificationSettings[1:]) l.Error("Detected multiple notification settings, which is not supported. Only the first will be applied", "ignored_settings", string(ignored)) } - return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels()) + return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels(features)) } return extraLabels } diff --git a/pkg/services/ngalert/state/state_test.go b/pkg/services/ngalert/state/state_test.go index 8daf82f9a66..acffa24e2cc 100644 --- a/pkg/services/ngalert/state/state_test.go +++ b/pkg/services/ngalert/state/state_test.go @@ -779,7 +779,7 @@ func TestGetRuleExtraLabels(t *testing.T) { models.RuleUIDLabel: rule.UID, ngmodels.AutogeneratedRouteLabel: "true", ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver, - ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(), + ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(), }, }, "ignore_multiple_notifications": { @@ -794,14 +794,14 @@ func TestGetRuleExtraLabels(t *testing.T) { models.RuleUIDLabel: rule.UID, ngmodels.AutogeneratedRouteLabel: "true", ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver, - ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(), + ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(), }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder) + result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder, nil) require.Equal(t, tc.expected, result) }) } diff --git a/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go b/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go index feec2e53db0..eac01f6c636 100644 --- a/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go +++ b/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go @@ -41,8 +41,10 @@ func TestGet(t *testing.T) { cfg := setting.NewCfg() ds := &fakeDatasources.FakeDataSourceService{} db := &dbtest.FakeDB{ExpectedError: pluginsettings.ErrPluginSettingNotFound} + store, err := pluginstore.NewPluginStoreForTest(preg, &pluginFakes.FakeLoader{}, &pluginFakes.FakeSourceRegistry{}) + require.NoError(t, err) pcp := plugincontext.ProvideService(cfg, localcache.ProvideService(), - pluginstore.New(preg, &pluginFakes.FakeLoader{}), &fakeDatasources.FakeCacheService{}, + store, &fakeDatasources.FakeCacheService{}, ds, pluginSettings.ProvideService(db, secretstest.NewFakeSecretsService()), pluginconfig.NewFakePluginRequestConfigProvider(), ) identity := &user.SignedInUser{OrgID: int64(1), Login: "admin"} diff --git a/pkg/services/pluginsintegration/plugininstaller/service.go b/pkg/services/pluginsintegration/plugininstaller/service.go index 543d354c425..7232703820c 100644 --- a/pkg/services/pluginsintegration/plugininstaller/service.go +++ b/pkg/services/pluginsintegration/plugininstaller/service.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/plugins/repo" @@ -18,6 +19,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ServiceName = "plugin.backgroundinstaller" + var ( installRequestCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "plugins", @@ -36,6 +39,7 @@ var ( ) type Service struct { + services.NamedService cfg *setting.Cfg log log.Logger pluginInstaller plugins.Installer @@ -43,6 +47,7 @@ type Service struct { pluginRepo repo.Service features featuremgmt.FeatureToggles updateChecker pluginchecker.PluginUpdateChecker + installComplete chan struct{} // closed when all plugins are installed (used for testing) } func ProvideService( @@ -60,21 +65,18 @@ func ProvideService( }) s := &Service{ - log: log.New("plugin.backgroundinstaller"), + log: log.New(ServiceName), cfg: cfg, pluginInstaller: pluginInstaller, pluginStore: pluginStore, pluginRepo: pluginRepo, features: features, updateChecker: updateChecker, + installComplete: make(chan struct{}), } - if len(cfg.PreinstallPluginsSync) > 0 { - // Block initialization process until plugins are installed - err := s.installPluginsWithTimeout(cfg.PreinstallPluginsSync) - if err != nil { - return nil, err - } - } + + s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName) + return s, nil } @@ -83,24 +85,6 @@ func (s *Service) IsDisabled() bool { return len(s.cfg.PreinstallPluginsAsync) == 0 } -func (s *Service) installPluginsWithTimeout(pluginsToInstall []setting.InstallPlugin) error { - // Installation process does not timeout by default nor reuses the context - // passed to the request so we need to handle the timeout here. - // We could make this timeout configurable in the future. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - done := make(chan struct{ err error }) - go func() { - done <- struct{ err error }{err: s.installPlugins(ctx, pluginsToInstall, true)} - }() - select { - case <-ctx.Done(): - return fmt.Errorf("failed to install plugins: %w", ctx.Err()) - case d := <-done: - return d.err - } -} - func (s *Service) shouldUpdate(ctx context.Context, pluginID, currentVersion string, pluginURL string) bool { // If the plugin is installed from a URL, we cannot check for updates as we do not have the version information // from the repository. Therefore, we assume that the plugin should be updated if the URL is provided. @@ -166,11 +150,34 @@ func (s *Service) installPlugins(ctx context.Context, pluginsToInstall []setting return nil } -func (s *Service) Run(ctx context.Context) error { - err := s.installPlugins(ctx, s.cfg.PreinstallPluginsAsync, false) - if err != nil { - // Unexpected error, asynchronous installation should not return errors - s.log.Error("Failed to install plugins", "error", err) +func (s *Service) starting(ctx context.Context) error { + if len(s.cfg.PreinstallPluginsSync) > 0 { + s.log.Info("Installing plugins", "plugins", s.cfg.PreinstallPluginsSync) + if err := s.installPlugins(ctx, s.cfg.PreinstallPluginsSync, true); err != nil { + s.log.Error("Failed to install plugins", "error", err) + return err + } } + s.log.Info("Plugins installed", "plugins", s.cfg.PreinstallPluginsSync) return nil } + +func (s *Service) running(ctx context.Context) error { + if len(s.cfg.PreinstallPluginsAsync) > 0 { + s.log.Info("Installing plugins", "plugins", s.cfg.PreinstallPluginsAsync) + if err := s.installPlugins(ctx, s.cfg.PreinstallPluginsAsync, false); err != nil { + s.log.Error("Failed to install plugins", "error", err) + return err + } + } + close(s.installComplete) + <-ctx.Done() + return nil +} + +func (s *Service) Run(ctx context.Context) error { + if err := s.StartAsync(ctx); err != nil { + return err + } + return s.AwaitTerminated(ctx) +} diff --git a/pkg/services/pluginsintegration/plugininstaller/service_test.go b/pkg/services/pluginsintegration/plugininstaller/service_test.go index 1d6c3059eac..7a846933db2 100644 --- a/pkg/services/pluginsintegration/plugininstaller/service_test.go +++ b/pkg/services/pluginsintegration/plugininstaller/service_test.go @@ -26,7 +26,7 @@ func TestService_IsDisabled(t *testing.T) { &setting.Cfg{ PreinstallPluginsAsync: []setting.InstallPlugin{{ID: "myplugin"}}, }, - pluginstore.New(registry.NewInMemory(), &fakes.FakeLoader{}), + pluginstore.New(registry.NewInMemory(), &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}), &fakes.FakePluginInstaller{}, prometheus.NewRegistry(), &fakes.FakePluginRepo{}, @@ -160,12 +160,14 @@ func TestService_Run(t *testing.T) { } installed := 0 installedFromURL := 0 + store, err := pluginstore.NewPluginStoreForTest(preg, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) s, err := ProvideService( &setting.Cfg{ PreinstallPluginsAsync: tt.pluginsToInstall, PreinstallPluginsSync: tt.pluginsToInstallSync, }, - pluginstore.New(preg, &fakes.FakeLoader{}), + store, &fakes.FakePluginInstaller{ AddFunc: func(ctx context.Context, pluginID string, version string, opts plugins.AddOpts) error { for _, plugin := range tt.pluginsToFail { @@ -203,13 +205,26 @@ func TestService_Run(t *testing.T) { &pluginchecker.FakePluginPreinstall{}, ), ) + require.NoError(t, err) + + t.Cleanup(func() { + s.StopAsync() + err := s.AwaitTerminated(context.Background()) + if tt.shouldThrowError { + require.ErrorContains(t, err, "Failed to install plugin") + return + } + require.NoError(t, err) + }) + + err = s.StartAsync(context.Background()) + require.NoError(t, err) + err = s.AwaitRunning(context.Background()) if tt.shouldThrowError { require.ErrorContains(t, err, "Failed to install plugin") return } require.NoError(t, err) - err = s.Run(context.Background()) - require.NoError(t, err) if tt.shouldInstall { expectedInstalled := 0 @@ -232,6 +247,7 @@ func TestService_Run(t *testing.T) { expectedInstalled++ } } + <-s.installComplete require.Equal(t, expectedInstalled, installed) require.Equal(t, expectedInstalledFromURL, installedFromURL) } diff --git a/pkg/services/pluginsintegration/pluginstore/store.go b/pkg/services/pluginsintegration/pluginstore/store.go index 20ef15379cf..f486218040e 100644 --- a/pkg/services/pluginsintegration/pluginstore/store.go +++ b/pkg/services/pluginsintegration/pluginstore/store.go @@ -3,18 +3,21 @@ package pluginstore import ( "context" "sort" - "sync" "time" + "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/plugins/manager/loader" "github.com/grafana/grafana/pkg/plugins/manager/registry" "github.com/grafana/grafana/pkg/plugins/manager/sources" + "golang.org/x/sync/errgroup" ) var _ Store = (*Service)(nil) +const ServiceName = "plugins.store" + // Store is the publicly accessible storage for plugins. type Store interface { // Plugin finds a plugin by its ID. @@ -25,47 +28,81 @@ type Store interface { } type Service struct { + services.NamedService + pluginRegistry registry.Service pluginLoader loader.Service + pluginSources sources.Registry } func ProvideService(pluginRegistry registry.Service, pluginSources sources.Registry, - pluginLoader loader.Service) (*Service, error) { - ctx := context.Background() + pluginLoader loader.Service) *Service { + return New(pluginRegistry, pluginLoader, pluginSources) +} + +func (s *Service) Run(ctx context.Context) error { + if err := s.StartAsync(ctx); err != nil { + return err + } + stopCtx := context.Background() + return s.AwaitTerminated(stopCtx) +} + +func NewPluginStoreForTest(pluginRegistry registry.Service, pluginLoader loader.Service, pluginSources sources.Registry) (*Service, error) { + s := New(pluginRegistry, pluginLoader, pluginSources) + if err := s.StartAsync(context.Background()); err != nil { + return nil, err + } + if err := s.AwaitRunning(context.Background()); err != nil { + return nil, err + } + return s, nil +} + +func New(pluginRegistry registry.Service, pluginLoader loader.Service, pluginSources sources.Registry) *Service { + s := &Service{ + pluginRegistry: pluginRegistry, + pluginLoader: pluginLoader, + pluginSources: pluginSources, + } + s.NamedService = services.NewBasicService(s.starting, s.running, s.stopping).WithName(ServiceName) + return s +} + +func (s *Service) starting(ctx context.Context) error { start := time.Now() totalPlugins := 0 - logger := log.New("plugin.store") + logger := log.New(ServiceName) logger.Info("Loading plugins...") - for _, ps := range pluginSources.List(ctx) { - loadedPlugins, err := pluginLoader.Load(ctx, ps) + for _, ps := range s.pluginSources.List(ctx) { + loadedPlugins, err := s.pluginLoader.Load(ctx, ps) if err != nil { logger.Error("Loading plugin source failed", "source", ps.PluginClass(ctx), "error", err) - return nil, err + return err } - totalPlugins += len(loadedPlugins) } logger.Info("Plugins loaded", "count", totalPlugins, "duration", time.Since(start)) - return New(pluginRegistry, pluginLoader), nil + return nil } -func (s *Service) Run(ctx context.Context) error { +func (s *Service) running(ctx context.Context) error { <-ctx.Done() - s.shutdown(ctx) - return ctx.Err() + return nil } -func New(pluginRegistry registry.Service, pluginLoader loader.Service) *Service { - return &Service{ - pluginRegistry: pluginRegistry, - pluginLoader: pluginLoader, - } +func (s *Service) stopping(failureReason error) error { + return s.shutdown(context.Background()) } func (s *Service) Plugin(ctx context.Context, pluginID string) (Plugin, bool) { + if err := s.AwaitRunning(ctx); err != nil { + log.New(ServiceName).FromContext(ctx).Error("Failed to get plugin", "error", err) + return Plugin{}, false + } p, exists := s.plugin(ctx, pluginID) if !exists { return Plugin{}, false @@ -75,6 +112,10 @@ func (s *Service) Plugin(ctx context.Context, pluginID string) (Plugin, bool) { } func (s *Service) Plugins(ctx context.Context, pluginTypes ...plugins.Type) []Plugin { + if err := s.AwaitRunning(ctx); err != nil { + log.New(ServiceName).FromContext(ctx).Error("Failed to get plugins", "error", err) + return []Plugin{} + } // if no types passed, assume all if len(pluginTypes) == 0 { pluginTypes = plugins.PluginTypes @@ -125,6 +166,10 @@ func (s *Service) availablePlugins(ctx context.Context) []*plugins.Plugin { } func (s *Service) Routes(ctx context.Context) []*plugins.StaticRoute { + if err := s.AwaitRunning(ctx); err != nil { + log.New(ServiceName).FromContext(ctx).Error("Failed to get routes", "error", err) + return []*plugins.StaticRoute{} + } staticRoutes := make([]*plugins.StaticRoute, 0) for _, p := range s.availablePlugins(ctx) { @@ -135,18 +180,20 @@ func (s *Service) Routes(ctx context.Context) []*plugins.StaticRoute { return staticRoutes } -func (s *Service) shutdown(ctx context.Context) { - var wg sync.WaitGroup - for _, plugin := range s.pluginRegistry.Plugins(ctx) { - wg.Add(1) - go func(ctx context.Context, p *plugins.Plugin) { - defer wg.Done() - p.Logger().Debug("Stopping plugin") - if _, err := s.pluginLoader.Unload(ctx, p); err != nil { - p.Logger().Error("Failed to stop plugin", "error", err) +func (s *Service) shutdown(ctx context.Context) error { + var errgroup errgroup.Group + plugins := s.pluginRegistry.Plugins(ctx) + for _, p := range plugins { + plugin := p // capture loop variable + errgroup.Go(func() error { + plugin.Logger().Debug("Stopping plugin") + if _, err := s.pluginLoader.Unload(ctx, plugin); err != nil { + plugin.Logger().Error("Failed to stop plugin", "error", err) + return err } - p.Logger().Debug("Plugin stopped") - }(ctx, plugin) + plugin.Logger().Debug("Plugin stopped") + return nil + }) } - wg.Wait() + return errgroup.Wait() } diff --git a/pkg/services/pluginsintegration/pluginstore/store_test.go b/pkg/services/pluginsintegration/pluginstore/store_test.go index b0c0b408cf9..e195fe5e932 100644 --- a/pkg/services/pluginsintegration/pluginstore/store_test.go +++ b/pkg/services/pluginsintegration/pluginstore/store_test.go @@ -2,7 +2,7 @@ package pluginstore import ( "context" - "sync" + "errors" "testing" "github.com/stretchr/testify/require" @@ -43,7 +43,11 @@ func TestStore_ProvideService(t *testing.T) { } }} - _, err := ProvideService(fakes.NewFakePluginRegistry(), srcs, l) + service := ProvideService(fakes.NewFakePluginRegistry(), srcs, l) + ctx := context.Background() + err := service.StartAsync(ctx) + require.NoError(t, err) + err = service.AwaitRunning(ctx) require.NoError(t, err) require.Equal(t, []plugins.Class{"1", "2", "3"}, loadedSrcs) }) @@ -55,12 +59,13 @@ func TestStore_Plugin(t *testing.T) { p1.RegisterClient(&DecommissionedPlugin{}) p2 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-panel"}} - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) p, exists := ps.Plugin(context.Background(), p1.ID) require.False(t, exists) @@ -81,7 +86,7 @@ func TestStore_Plugins(t *testing.T) { p5 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "e-test-panel", Type: plugins.TypePanel}} p5.RegisterClient(&DecommissionedPlugin{}) - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, @@ -89,7 +94,8 @@ func TestStore_Plugins(t *testing.T) { p4.ID: p4, p5.ID: p5, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) ToGrafanaDTO(p1) pss := ps.Plugins(context.Background()) @@ -124,7 +130,7 @@ func TestStore_Routes(t *testing.T) { p6 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "f-test-app", Type: plugins.TypeApp}} p6.RegisterClient(&DecommissionedPlugin{}) - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, @@ -132,7 +138,8 @@ func TestStore_Routes(t *testing.T) { p5.ID: p5, p6.ID: p6, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) sr := func(p *plugins.Plugin) *plugins.StaticRoute { return &plugins.StaticRoute{PluginID: p.ID, Directory: p.FS.Base()} @@ -144,39 +151,62 @@ func TestStore_Routes(t *testing.T) { } func TestProcessManager_shutdown(t *testing.T) { - p := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-datasource", Type: plugins.TypeDataSource}} // Backend: true - backend := &fakes.FakeBackendPlugin{} - p.RegisterClient(backend) - p.SetLogger(log.NewTestLogger()) + t.Run("When context is cancelled the plugin is stopped", func(t *testing.T) { + p := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-datasource", Type: plugins.TypeDataSource}} // Backend: true + backend := &fakes.FakeBackendPlugin{} + p.RegisterClient(backend) + p.SetLogger(log.NewTestLogger()) - unloaded := false - ps := New(&fakes.FakePluginRegistry{ - Store: map[string]*plugins.Plugin{ - p.ID: p, - }, - }, &fakes.FakeLoader{ - UnloadFunc: func(_ context.Context, plugin *plugins.Plugin) (*plugins.Plugin, error) { - require.Equal(t, p, plugin) - unloaded = true - return nil, nil - }, + unloaded := false + ps := New(&fakes.FakePluginRegistry{ + Store: map[string]*plugins.Plugin{ + p.ID: p, + }, + }, &fakes.FakeLoader{ + UnloadFunc: func(_ context.Context, plugin *plugins.Plugin) (*plugins.Plugin, error) { + require.Equal(t, p, plugin) + unloaded = true + return nil, nil + }, + }, &fakes.FakeSourceRegistry{}) + + ctx, cancel := context.WithCancel(context.Background()) + + err := ps.StartAsync(ctx) + require.NoError(t, err) + err = ps.AwaitRunning(ctx) + require.NoError(t, err) + + // Cancel context to trigger shutdown + cancel() + + // Wait for service to be fully terminated + err = ps.AwaitTerminated(context.Background()) + require.NoError(t, err) + require.True(t, unloaded) }) - pCtx := context.Background() - cCtx, cancel := context.WithCancel(pCtx) - var wgRun sync.WaitGroup - wgRun.Add(1) - var runErr error - go func() { - runErr = ps.Run(cCtx) - wgRun.Done() - }() + t.Run("When shutdown fails, stopping method returns error", func(t *testing.T) { + p := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-datasource", Type: plugins.TypeDataSource}} + backend := &fakes.FakeBackendPlugin{} + p.RegisterClient(backend) + p.SetLogger(log.NewTestLogger()) - t.Run("When context is cancelled the plugin is stopped", func(t *testing.T) { - cancel() - wgRun.Wait() - require.ErrorIs(t, runErr, context.Canceled) - require.True(t, unloaded) + expectedErr := errors.New("unload failed") + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ + Store: map[string]*plugins.Plugin{ + p.ID: p, + }, + }, &fakes.FakeLoader{ + UnloadFunc: func(_ context.Context, plugin *plugins.Plugin) (*plugins.Plugin, error) { + return nil, expectedErr + }, + }, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + + err = ps.stopping(nil) + require.Error(t, err) + require.ErrorIs(t, err, expectedErr) }) } @@ -186,12 +216,13 @@ func TestStore_availablePlugins(t *testing.T) { p1.RegisterClient(&DecommissionedPlugin{}) p2 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-app"}} - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) aps := ps.availablePlugins(context.Background()) require.Len(t, aps, 1) diff --git a/pkg/services/pluginsintegration/test_helper.go b/pkg/services/pluginsintegration/test_helper.go index 8fec1925eb3..6689430c43b 100644 --- a/pkg/services/pluginsintegration/test_helper.go +++ b/pkg/services/pluginsintegration/test_helper.go @@ -67,7 +67,7 @@ func CreateIntegrationTestCtx(t *testing.T, cfg *setting.Cfg, coreRegistry *core Terminator: term, }) - ps, err := pluginstore.ProvideService(reg, sources.ProvideService(cfg, pCfg), l) + ps, err := pluginstore.NewPluginStoreForTest(reg, l, sources.ProvideService(cfg, pCfg)) require.NoError(t, err) return &IntegrationTestCtx{ diff --git a/pkg/services/preference/model.go b/pkg/services/preference/model.go index edcde2f9fb2..26fdc11ac02 100644 --- a/pkg/services/preference/model.go +++ b/pkg/services/preference/model.go @@ -74,6 +74,12 @@ type SavePreferenceCommand struct { Navbar *NavbarPreference `json:"navbar,omitempty"` } +// One (and only one) of the values must be non-zero +type DeleteCommand struct { + OrgID int64 + UserID int64 + TeamID int64 +} type PatchPreferenceCommand struct { UserID int64 OrgID int64 diff --git a/pkg/services/preference/pref.go b/pkg/services/preference/pref.go index 1da5345d16a..6c6697d442a 100644 --- a/pkg/services/preference/pref.go +++ b/pkg/services/preference/pref.go @@ -10,5 +10,5 @@ type Service interface { Save(context.Context, *SavePreferenceCommand) error Patch(context.Context, *PatchPreferenceCommand) error GetDefaults() *Preference - DeleteByUser(context.Context, int64) error + Delete(context.Context, *DeleteCommand) error } diff --git a/pkg/services/preference/prefimpl/inmemory_test.go b/pkg/services/preference/prefimpl/inmemory_test.go index 63625cdb397..d24d230e0b7 100644 --- a/pkg/services/preference/prefimpl/inmemory_test.go +++ b/pkg/services/preference/prefimpl/inmemory_test.go @@ -121,6 +121,6 @@ func (s *inmemStore) Update(ctx context.Context, preference *pref.Preference) er return nil } -func (s *inmemStore) DeleteByUser(ctx context.Context, userID int64) error { +func (s *inmemStore) Delete(context.Context, *pref.DeleteCommand) error { panic("not yet implemented") } diff --git a/pkg/services/preference/prefimpl/pref.go b/pkg/services/preference/prefimpl/pref.go index 392f0617048..429feedfa79 100644 --- a/pkg/services/preference/prefimpl/pref.go +++ b/pkg/services/preference/prefimpl/pref.go @@ -272,8 +272,8 @@ func (s *Service) GetDefaults() *pref.Preference { } } -func (s *Service) DeleteByUser(ctx context.Context, userID int64) error { - return s.store.DeleteByUser(ctx, userID) +func (s *Service) Delete(ctx context.Context, cmd *pref.DeleteCommand) error { + return s.store.Delete(ctx, cmd) } func parseCookiePreferences(prefs []pref.CookieType) (map[string]struct{}, error) { diff --git a/pkg/services/preference/prefimpl/store.go b/pkg/services/preference/prefimpl/store.go index 7c8575a8a07..192f92cfb04 100644 --- a/pkg/services/preference/prefimpl/store.go +++ b/pkg/services/preference/prefimpl/store.go @@ -12,5 +12,5 @@ type store interface { // Insert adds a new preference and returns its sequential ID Insert(context.Context, *pref.Preference) (int64, error) Update(context.Context, *pref.Preference) error - DeleteByUser(context.Context, int64) error + Delete(context.Context, *pref.DeleteCommand) error } diff --git a/pkg/services/preference/prefimpl/store_test.go b/pkg/services/preference/prefimpl/store_test.go index 40015e80425..4bb971029cc 100644 --- a/pkg/services/preference/prefimpl/store_test.go +++ b/pkg/services/preference/prefimpl/store_test.go @@ -185,9 +185,10 @@ func testIntegrationPreferencesDataAccess(t *testing.T, fn getStore) { require.NoError(t, err) }) t.Run("delete preference by user", func(t *testing.T) { - err := prefStore.DeleteByUser(context.Background(), user.SignedInUser{}.UserID) + userId := int64(1) + err := prefStore.Delete(context.Background(), &pref.DeleteCommand{UserID: userId}) require.NoError(t, err) - query := &pref.Preference{OrgID: 0, UserID: user.SignedInUser{}.UserID, TeamID: 0} + query := &pref.Preference{OrgID: 0, UserID: userId, TeamID: 0} _, err = prefStore.Get(context.Background(), query) require.EqualError(t, err, pref.ErrPrefNotFound.Error()) }) diff --git a/pkg/services/preference/prefimpl/xorm_store.go b/pkg/services/preference/prefimpl/xorm_store.go index 286abd57885..1b5b2535829 100644 --- a/pkg/services/preference/prefimpl/xorm_store.go +++ b/pkg/services/preference/prefimpl/xorm_store.go @@ -2,6 +2,7 @@ package prefimpl import ( "context" + "fmt" "strings" "github.com/grafana/grafana/pkg/infra/db" @@ -80,10 +81,27 @@ func (s *sqlStore) Insert(ctx context.Context, cmd *pref.Preference) (int64, err return ID, err } -func (s *sqlStore) DeleteByUser(ctx context.Context, userID int64) error { - return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { - var rawSQL = "DELETE FROM preferences WHERE user_id = ?" - _, err := dbSession.Exec(rawSQL, userID) - return err - }) +func (s *sqlStore) Delete(ctx context.Context, cmd *pref.DeleteCommand) error { + if cmd.UserID > 0 { + return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { + var rawSQL = "DELETE FROM preferences WHERE user_id = ?" + _, err := dbSession.Exec(rawSQL, cmd.UserID) + return err + }) + } + if cmd.TeamID > 0 { + return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { + var rawSQL = "DELETE FROM preferences WHERE team_id = ?" + _, err := dbSession.Exec(rawSQL, cmd.TeamID) + return err + }) + } + if cmd.OrgID > 0 { + return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { + var rawSQL = "DELETE FROM preferences WHERE org_id = ? AND user_id=0 AND team_id=0" + _, err := dbSession.Exec(rawSQL, cmd.OrgID) + return err + }) + } + return fmt.Errorf("expecting one of team, org, user to be non-zero") } diff --git a/pkg/services/preference/preftest/fake.go b/pkg/services/preference/preftest/fake.go index 8c6d7e07708..c8cf6b0be19 100644 --- a/pkg/services/preference/preftest/fake.go +++ b/pkg/services/preference/preftest/fake.go @@ -35,6 +35,6 @@ func (f *FakePreferenceService) Patch(ctx context.Context, cmd *pref.PatchPrefer return f.ExpectedError } -func (f *FakePreferenceService) DeleteByUser(context.Context, int64) error { +func (f *FakePreferenceService) Delete(context.Context, *pref.DeleteCommand) error { return f.ExpectedError } diff --git a/pkg/services/provisioning/provisioning.go b/pkg/services/provisioning/provisioning.go index b40b6136a46..e7e8c3c5fa7 100644 --- a/pkg/services/provisioning/provisioning.go +++ b/pkg/services/provisioning/provisioning.go @@ -7,6 +7,7 @@ import ( "path/filepath" "sync" + "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/tracing" @@ -39,6 +40,8 @@ import ( "github.com/grafana/grafana/pkg/storage/legacysql/dualwrite" ) +const ServiceName = "provisioning" + func ProvideService( ac accesscontrol.AccessControl, cfg *setting.Cfg, @@ -91,6 +94,8 @@ func ProvideService( dual: dual, } + s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName) + if err := s.setDashboardProvisioner(); err != nil { return nil, err } @@ -98,6 +103,67 @@ func ProvideService( return s, nil } +func (ps *ProvisioningServiceImpl) starting(ctx context.Context) error { + if err := ps.ProvisionDatasources(ctx); err != nil { + ps.log.Error("Failed to provision data sources", "error", err) + return err + } + + if err := ps.ProvisionPlugins(ctx); err != nil { + ps.log.Error("Failed to provision plugins", "error", err) + return err + } + + if err := ps.ProvisionAlerting(ctx); err != nil { + ps.log.Error("Failed to provision alerting", "error", err) + return err + } + + // Migrating prom types relies on data source provisioning to already be completed + // If we can make services depend on other services completing first, + // then we should remove this from provisioning + if err := ps.migratePrometheusType(ctx); err != nil { + ps.log.Error("Failed to migrate Prometheus type", "error", err) + return err + } + + if err := ps.ProvisionDashboards(ctx); err != nil { + ps.log.Error("Failed to provision dashboard", "error", err) + // Consider the allow list of errors for which running the provisioning service should not + // fail. For now this includes only dashboards.ErrGetOrCreateFolder. + if !errors.Is(err, dashboards.ErrGetOrCreateFolder) { + return err + } + } + if ps.dashboardProvisioner.HasDashboardSources() { + ps.searchService.TriggerReIndex() + } + return nil +} + +func (ps *ProvisioningServiceImpl) running(ctx context.Context) error { + for { + // Wait for unlock. This is tied to new dashboardProvisioner to be instantiated before we start polling. + ps.mutex.Lock() + // Using background here because otherwise if root context was canceled the select later on would + // non-deterministically take one of the route possibly going into one polling loop before exiting. + pollingContext, cancelFun := context.WithCancel(context.Background()) + ps.pollingCtxCancel = cancelFun + ps.dashboardProvisioner.PollChanges(pollingContext) + ps.mutex.Unlock() + + select { + case <-pollingContext.Done(): + // Polling was canceled. + continue + case <-ctx.Done(): + // Root server context was cancelled so cancel polling and leave. + ps.cancelPolling() + return nil + } + } +} + func (ps *ProvisioningServiceImpl) setDashboardProvisioner() error { dashboardPath := filepath.Join(ps.Cfg.ProvisioningPath, "dashboards") dashProvisioner, err := ps.newDashboardProvisioner(context.Background(), dashboardPath, ps.dashboardProvisioningService, ps.orgService, ps.dashboardService, ps.folderService, ps.dual) @@ -137,6 +203,8 @@ func newProvisioningServiceImpl( migratePrometheusType: migratePrometheusType, } + s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName) + if err := s.setDashboardProvisioner(); err != nil { return nil, err } @@ -145,6 +213,7 @@ func newProvisioningServiceImpl( } type ProvisioningServiceImpl struct { + services.NamedService Cfg *setting.Cfg SQLStore db.DB orgService org.Service @@ -173,7 +242,6 @@ type ProvisioningServiceImpl struct { resourcePermissions accesscontrol.ReceiverPermissionsService tracer tracing.Tracer dual dualwrite.Service - onceInitProvisioners sync.Once migratePrometheusType func(context.Context) error } @@ -185,78 +253,11 @@ func (ps *ProvisioningServiceImpl) RunInitProvisioners(ctx context.Context) erro } func (ps *ProvisioningServiceImpl) Run(ctx context.Context) error { - var err error - - // Run Datasources, Plugins and Alerting Provisioning only once. - // It can't be initialized at RunInitProvisioners because it - // depends on the /apis endpoints to be already running and listeningq - ps.onceInitProvisioners.Do(func() { - err = ps.ProvisionDatasources(ctx) - if err != nil { - ps.log.Error("Failed to provision data sources", "error", err) - return - } - - err = ps.ProvisionPlugins(ctx) - if err != nil { - ps.log.Error("Failed to provision plugins", "error", err) - return - } - - err = ps.ProvisionAlerting(ctx) - if err != nil { - ps.log.Error("Failed to provision alerting", "error", err) - return - } - - // Migrating prom types relies on data source provisioning to already be completed - // If we can make services depend on other services completing first, - // then we should remove this from provisioning - err = ps.migratePrometheusType(ctx) - if err != nil { - ps.log.Error("Failed to migrate Prometheus type", "error", err) - return - } - }) - - if err != nil { - // error already logged + if err := ps.StartAsync(ctx); err != nil { return err } - - err = ps.ProvisionDashboards(ctx) - if err != nil { - ps.log.Error("Failed to provision dashboard", "error", err) - // Consider the allow list of errors for which running the provisioning service should not - // fail. For now this includes only dashboards.ErrGetOrCreateFolder. - if !errors.Is(err, dashboards.ErrGetOrCreateFolder) { - return err - } - } - if ps.dashboardProvisioner.HasDashboardSources() { - ps.searchService.TriggerReIndex() - } - - for { - // Wait for unlock. This is tied to new dashboardProvisioner to be instantiated before we start polling. - ps.mutex.Lock() - // Using background here because otherwise if root context was canceled the select later on would - // non-deterministically take one of the route possibly going into one polling loop before exiting. - pollingContext, cancelFun := context.WithCancel(context.Background()) - ps.pollingCtxCancel = cancelFun - ps.dashboardProvisioner.PollChanges(pollingContext) - ps.mutex.Unlock() - - select { - case <-pollingContext.Done(): - // Polling was canceled. - continue - case <-ctx.Done(): - // Root server context was cancelled so cancel polling and leave. - ps.cancelPolling() - return ctx.Err() - } - } + stopCtx := context.Background() + return ps.AwaitTerminated(stopCtx) } func (ps *ProvisioningServiceImpl) ProvisionDatasources(ctx context.Context) error { diff --git a/pkg/services/provisioning/provisioning_test.go b/pkg/services/provisioning/provisioning_test.go index ee4abc5029b..8b513ba321e 100644 --- a/pkg/services/provisioning/provisioning_test.go +++ b/pkg/services/provisioning/provisioning_test.go @@ -48,7 +48,7 @@ func TestProvisioningServiceImpl(t *testing.T) { serviceTest.waitForStop() assert.False(t, serviceTest.serviceRunning, "Service should not be running") - assert.Equal(t, context.Canceled, serviceTest.serviceError, "Service should have returned canceled error") + assert.NoError(t, serviceTest.serviceError, "Service should not have returned an error") }) t.Run("Failed reloading does not stop polling with old provisioned", func(t *testing.T) { @@ -91,7 +91,7 @@ func TestProvisioningServiceImpl(t *testing.T) { serviceTest.cancel() serviceTest.waitForStop() - assert.Equal(t, context.Canceled, serviceTest.serviceError) + assert.NoError(t, serviceTest.serviceError, "Service should not have returned an error") }) t.Run("Should return run error when dashboard provisioning fails for non-allow-listed error", func(t *testing.T) { diff --git a/pkg/services/store/kind/dashboard/dashboard.go b/pkg/services/store/kind/dashboard/dashboard.go index 3229acbe80d..d6000689c1a 100644 --- a/pkg/services/store/kind/dashboard/dashboard.go +++ b/pkg/services/store/kind/dashboard/dashboard.go @@ -334,6 +334,9 @@ func filterOutSpecialDatasources(dash *DashboardSummaryInfo) { case "-- Dashboard --": // The `Dashboard` datasource refers to the results of the query used in another panel continue + case "grafana": + // this is the uid for the -- Grafana -- datasource + continue default: dsRefs = append(dsRefs, ds) } diff --git a/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json b/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json index 136e20a8571..a6502144412 100644 --- a/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json +++ b/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json @@ -2,24 +2,12 @@ "id": 250, "title": "fast streaming", "tags": null, - "datasource": [ - { - "uid": "grafana", - "type": "datasource" - } - ], "panels": [ { "id": 3, "title": "Panel Title", "type": "timeseries", - "pluginVersion": "7.5.0-pre", - "datasource": [ - { - "uid": "grafana", - "type": "datasource" - } - ] + "pluginVersion": "7.5.0-pre" } ], "schemaVersion": 27, diff --git a/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json b/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json index 1f2fabff36f..37a7b893804 100644 --- a/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json +++ b/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json @@ -3,10 +3,6 @@ "title": "special ds", "tags": null, "datasource": [ - { - "uid": "grafana", - "type": "datasource" - }, { "uid": "dgd92lq7k", "type": "frser-sqlite-datasource" @@ -22,10 +18,6 @@ "title": "mixed ds with grafana ds", "type": "timeseries", "datasource": [ - { - "uid": "grafana", - "type": "datasource" - }, { "uid": "dgd92lq7k", "type": "frser-sqlite-datasource" @@ -45,13 +37,7 @@ { "id": 6, "title": "grafana ds", - "type": "timeseries", - "datasource": [ - { - "uid": "grafana", - "type": "datasource" - } - ] + "type": "timeseries" }, { "id": 2, diff --git a/pkg/services/team/teamapi/team.go b/pkg/services/team/teamapi/team.go index 9df49d0d66a..69436edd8cc 100644 --- a/pkg/services/team/teamapi/team.go +++ b/pkg/services/team/teamapi/team.go @@ -431,7 +431,8 @@ func (tapi *TeamAPI) validateTeam(c *contextmodel.ReqContext, teamID int64, prov return response.Error(http.StatusInternalServerError, "Failed to get Team", err) } - if teamDTO.IsProvisioned { + isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false) + if isGroupSyncEnabled && teamDTO.IsProvisioned { return response.Error(http.StatusBadRequest, provisionedMessage, err) } diff --git a/pkg/services/team/teamapi/team_members.go b/pkg/services/team/teamapi/team_members.go index 1303f97e028..e9f4d0ac91d 100644 --- a/pkg/services/team/teamapi/team_members.go +++ b/pkg/services/team/teamapi/team_members.go @@ -299,7 +299,8 @@ func (tapi *TeamAPI) removeTeamMember(c *contextmodel.ReqContext) response.Respo return response.Error(http.StatusInternalServerError, "Failed to get Team", err) } - if existingTeam.IsProvisioned { + isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false) + if isGroupSyncEnabled && existingTeam.IsProvisioned { return response.Error(http.StatusBadRequest, "Team memberships cannot be updated for provisioned teams", err) } diff --git a/pkg/services/team/teamapi/team_members_test.go b/pkg/services/team/teamapi/team_members_test.go index f14c01d1c73..65f8f2693ee 100644 --- a/pkg/services/team/teamapi/team_members_test.go +++ b/pkg/services/team/teamapi/team_members_test.go @@ -169,13 +169,15 @@ func TestUpdateTeamMembersAPIEndpoint(t *testing.T) { }) } -func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) { +func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsEnabled(t *testing.T) { server := SetupAPITestServer(t, &teamtest.FakeService{ ExpectedIsMember: true, ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true}, + }, func(tapi *TeamAPI) { + tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true") }) - t.Run("should not be able to update team member from a provisioned team", func(t *testing.T) { + t.Run("should not be able to update team member from a provisioned team if team sync is enabled", func(t *testing.T) { req := webtest.RequestWithSignedInUser( server.NewRequest(http.MethodPut, "/api/teams/1/members/1", strings.NewReader("{\"permission\": 1}")), authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}), @@ -186,7 +188,7 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) { require.NoError(t, res.Body.Close()) }) - t.Run("should not be able to update team member from a provisioned team by team UID", func(t *testing.T) { + t.Run("should not be able to update team member from a provisioned team by team UID if team sync is enabled", func(t *testing.T) { req := webtest.RequestWithSignedInUser( server.NewRequest(http.MethodPut, "/api/teams/a00001/members/1", strings.NewReader("{\"permission\": 1}")), authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}), @@ -198,6 +200,27 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) { }) } +func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsDisabled(t *testing.T) { + t.Run("should be able to delete team member from a provisioned team when SCIM group sync is disabled", func(t *testing.T) { + server := SetupAPITestServer(t, nil, func(hs *TeamAPI) { + hs.teamService = &teamtest.FakeService{ + ExpectedIsMember: true, + ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true}, + } + hs.teamPermissionsService = &actest.FakePermissionsService{} + }) + + req := webtest.RequestWithSignedInUser( + server.NewRequest(http.MethodDelete, "/api/teams/1/members/1", nil), + authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}), + ) + res, err := server.SendJSON(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + require.NoError(t, res.Body.Close()) + }) +} + func TestDeleteTeamMembersAPIEndpoint(t *testing.T) { server := SetupAPITestServer(t, nil, func(hs *TeamAPI) { hs.teamService = &teamtest.FakeService{ @@ -236,6 +259,8 @@ func TestDeleteTeamMembersFromProvisionedTeam(t *testing.T) { ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true}, } hs.teamPermissionsService = &actest.FakePermissionsService{} + }, func(hs *TeamAPI) { + hs.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true") }) t.Run("should not be able to delete team member from a provisioned team", func(t *testing.T) { diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index e31ceae24aa..11071c2a9fc 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -138,6 +138,7 @@ type Cfg struct { ProvisioningDisableControllers bool ProvisioningAllowedTargets []string ProvisioningAllowImageRendering bool + ProvisioningMinSyncInterval time.Duration ProvisioningRepositoryTypes []string ProvisioningLokiURL string ProvisioningLokiUser string @@ -2125,6 +2126,7 @@ func (cfg *Cfg) readProvisioningSettings(iniFile *ini.File) error { cfg.ProvisioningAllowedTargets = []string{"instance", "folder"} } cfg.ProvisioningAllowImageRendering = iniFile.Section("provisioning").Key("allow_image_rendering").MustBool(true) + cfg.ProvisioningMinSyncInterval = iniFile.Section("provisioning").Key("min_sync_interval").MustDuration(10 * time.Second) // Read job history configuration cfg.ProvisioningLokiURL = valueAsString(iniFile.Section("provisioning"), "loki_url", "") diff --git a/pkg/storage/unified/apistore/store.go b/pkg/storage/unified/apistore/store.go index bf104949fa9..a6c37c00dab 100644 --- a/pkg/storage/unified/apistore/store.go +++ b/pkg/storage/unified/apistore/store.go @@ -293,13 +293,6 @@ func (s *Storage) Delete( if err := preconditions.Check(key, out); err != nil { return err } - - if preconditions.ResourceVersion != nil { - cmd.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64) - if err != nil { - return err - } - } if preconditions.UID != nil { cmd.Uid = string(*preconditions.UID) } @@ -319,6 +312,10 @@ func (s *Storage) Delete( return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_DELETED, key, out, out) } + cmd.ResourceVersion, err = meta.GetResourceVersionInt64() + if err != nil { + return resource.GetError(resource.AsErrorResult(err)) + } rsp, err := s.store.Delete(ctx, cmd) if err != nil { return resource.GetError(resource.AsErrorResult(err)) @@ -536,6 +533,18 @@ func (s *Storage) GuaranteedUpdate( if err != nil { return err } + // NOTE: by default, the RV will **not** be set in the preconditions (it is removed here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L187) + // instead, the RV check is done with the object from the request itself. + // + // the object from the request is retrieved in the tryUpdate function (we use the generic k8s store one). this function calls the UpdateObject function here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L653 + // and that will run a series of transformations: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L219 + // + // the specific transformations it runs depends on what type of update it is. + // for patch, the transformers are set here and use the patchBytes from the request: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go#L697 + // for put, it uses the object from the request here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go#L163 + // + // after those transformations, the RV will then be on the object so that the RV check can properly be done here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L662 + // it will be compared to the current object that we pass in below from storage. if preconditions != nil && preconditions.ResourceVersion != nil { req.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64) if err != nil { @@ -611,41 +620,45 @@ func (s *Storage) GuaranteedUpdate( } continue } - break - } - v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj) - if err != nil { - return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_MODIFIED, key, updatedObj, destination) - } - - // Only update (for real) if the bytes have changed - var rv uint64 - req.Value = v.raw.Bytes() - if !bytes.Equal(req.Value, existingBytes) { - updateResponse, err := s.store.Update(ctx, req) + v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj) if err != nil { - err = resource.GetError(resource.AsErrorResult(err)) - } else if updateResponse.Error != nil { - err = resource.GetError(updateResponse.Error) + return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_MODIFIED, key, updatedObj, destination) } - // Cleanup secure values - if err = v.finish(ctx, err, s.opts.SecureValues); err != nil { + // Only update (for real) if the bytes have changed + var rv uint64 + req.Value = v.raw.Bytes() + if !bytes.Equal(req.Value, existingBytes) { + req.ResourceVersion = readResponse.ResourceVersion + updateResponse, err := s.store.Update(ctx, req) + if err != nil { + err = resource.GetError(resource.AsErrorResult(err)) + } else if updateResponse.Error != nil { + if attempt < MaxUpdateAttempts && updateResponse.Error.Code == http.StatusConflict { + continue // try the read again + } + err = resource.GetError(updateResponse.Error) + } + + // Cleanup secure values + if err = v.finish(ctx, err, s.opts.SecureValues); err != nil { + return err + } + + rv = uint64(updateResponse.ResourceVersion) + } + + if _, err := s.convertToObject(req.Value, destination); err != nil { return err } - rv = uint64(updateResponse.ResourceVersion) - } - - if _, err := s.convertToObject(req.Value, destination); err != nil { - return err - } - - if rv > 0 { - if err := s.versioner.UpdateObject(destination, rv); err != nil { - return err + if rv > 0 { + if err := s.versioner.UpdateObject(destination, rv); err != nil { + return err + } } + return nil } return nil diff --git a/pkg/storage/unified/resource/access.go b/pkg/storage/unified/resource/access.go index 161184c7a9e..94cd5b4a801 100644 --- a/pkg/storage/unified/resource/access.go +++ b/pkg/storage/unified/resource/access.go @@ -107,7 +107,7 @@ func NewAuthzLimitedClient(client claims.AccessClient, opts AuthzOptions) claims } // Check implements claims.AccessClient. -func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest) (claims.CheckResponse, error) { +func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest, folder string) (claims.CheckResponse, error) { t := time.Now() ctx, span := c.tracer.Start(ctx, "authzLimitedClient.Check", trace.WithAttributes( attribute.String("group", req.Group), @@ -115,7 +115,7 @@ func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req c attribute.String("namespace", req.Namespace), attribute.String("name", req.Name), attribute.String("verb", req.Verb), - attribute.String("folder", req.Folder), + attribute.String("folder", folder), attribute.Bool("fallback_used", FallbackUsed(ctx)), )) defer span.End() @@ -145,7 +145,7 @@ func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req c span.SetAttributes(attribute.Bool("allowed", true)) return claims.CheckResponse{Allowed: true}, nil } - resp, err := c.client.Check(ctx, id, req) + resp, err := c.client.Check(ctx, id, req, folder) if err != nil { c.logger.Error("Check", "group", req.Group, "resource", req.Resource, "error", err, "duration", time.Since(t), "traceid", trace.SpanContextFromContext(ctx).TraceID().String()) c.metrics.errorsTotal.WithLabelValues(req.Group, req.Resource, req.Verb).Inc() diff --git a/pkg/storage/unified/resource/access_test.go b/pkg/storage/unified/resource/access_test.go index e09e40f01c8..5d664aaba74 100644 --- a/pkg/storage/unified/resource/access_test.go +++ b/pkg/storage/unified/resource/access_test.go @@ -34,7 +34,7 @@ func TestAuthzLimitedClient_Check(t *testing.T) { Verb: utils.VerbGet, Namespace: "stacks-1", } - resp, err := client.Check(context.Background(), &identity.StaticRequester{Namespace: "stacks-1"}, req) + resp, err := client.Check(context.Background(), &identity.StaticRequester{Namespace: "stacks-1"}, req, "") assert.NoError(t, err) assert.Equal(t, test.expected, resp.Allowed) } @@ -135,7 +135,7 @@ func TestNamespaceMatching(t *testing.T) { // Create a mock auth info with the specified namespace // Test Check method user := &identity.StaticRequester{Namespace: tt.authNamespace} - _, checkErr := client.Check(ctx, user, checkReq) + _, checkErr := client.Check(ctx, user, checkReq, "") // Test Compile method compileReq := authlib.ListRequest{ @@ -199,7 +199,7 @@ func TestNamespaceMatchingFallback(t *testing.T) { // Create a mock auth info with the specified namespace // Test Check method user := &identity.StaticRequester{Namespace: tt.authNamespace} - _, checkErr := client.Check(ctx, user, checkReq) + _, checkErr := client.Check(ctx, user, checkReq, "") // Test Compile method compileReq := authlib.ListRequest{ diff --git a/pkg/storage/unified/resource/bulk.go b/pkg/storage/unified/resource/bulk.go index a0720efa6de..842d6638f5c 100644 --- a/pkg/storage/unified/resource/bulk.go +++ b/pkg/storage/unified/resource/bulk.go @@ -190,7 +190,7 @@ func (s *server) BulkProcess(stream resourcepb.BulkStore_BulkProcessServer) erro Group: k.Group, Resource: k.Resource, Verb: utils.VerbDeleteCollection, - }) + }, "") if err != nil || !rsp.Allowed { return sendAndClose(&resourcepb.BulkResponse{ Error: &resourcepb.ErrorResult{ diff --git a/pkg/storage/unified/resource/errors.go b/pkg/storage/unified/resource/errors.go index 79d3911e470..a0402be1dcb 100644 --- a/pkg/storage/unified/resource/errors.go +++ b/pkg/storage/unified/resource/errors.go @@ -18,8 +18,7 @@ import ( // Package-level errors. var ( - ErrOptimisticLockingFailed = errors.New("optimistic locking failed") - ErrNotImplementedYet = errors.New("not implemented yet") + ErrNotImplementedYet = errors.New("not implemented yet") ) var ( @@ -31,6 +30,12 @@ var ( Code: http.StatusConflict, }, } + + ErrOptimisticLockingFailed = resourcepb.ErrorResult{ + Code: http.StatusConflict, + Reason: "optimistic locking failed", + Message: "requested RV does not match saved RV", + } ) func NewBadRequestError(msg string) *resourcepb.ErrorResult { diff --git a/pkg/storage/unified/resource/server.go b/pkg/storage/unified/resource/server.go index f2613d6a3cf..17c36996ad9 100644 --- a/pkg/storage/unified/resource/server.go +++ b/pkg/storage/unified/resource/server.go @@ -22,7 +22,6 @@ import ( claims "github.com/grafana/authlib/types" "github.com/grafana/dskit/backoff" - "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apimachinery/validation" secrets "github.com/grafana/grafana/pkg/registry/apis/secret/contracts" @@ -558,8 +557,7 @@ func (s *server) newEvent(ctx context.Context, user claims.AuthInfo, key *resour check.Name = key.Name } - check.Folder = obj.GetFolder() - a, err := s.access.Check(ctx, user, check) + a, err := s.access.Check(ctx, user, check, obj.GetFolder()) if err != nil { return nil, AsErrorResult(err) } @@ -596,10 +594,9 @@ func (s *server) checkFolderMovePermissions(ctx context.Context, user claims.Aut Resource: key.Resource, Namespace: key.Namespace, Name: key.Name, - Folder: oldFolder, } - a, err := s.access.Check(ctx, user, updateCheck) + a, err := s.access.Check(ctx, user, updateCheck, oldFolder) if err != nil { return AsErrorResult(err) } @@ -616,10 +613,10 @@ func (s *server) checkFolderMovePermissions(ctx context.Context, user claims.Aut Group: key.Group, Resource: key.Resource, Namespace: key.Namespace, - Folder: newFolder, + Name: key.Name, } - a, err = s.access.Check(ctx, user, createCheck) + a, err = s.access.Check(ctx, user, createCheck, newFolder) if err != nil { return AsErrorResult(err) } @@ -734,8 +731,12 @@ func (s *server) update(ctx context.Context, user claims.AuthInfo, req *resource return rsp, nil } + // TODO: once we know the client is always sending the RV, require ResourceVersion > 0 + // See: https://github.com/grafana/grafana/pull/111866 if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion { - return nil, ErrOptimisticLockingFailed + return &resourcepb.UpdateResponse{ + Error: &ErrOptimisticLockingFailed, + }, nil } event, e := s.newEvent(ctx, user, req.Key, req.Value, latest.Value) @@ -799,7 +800,7 @@ func (s *server) delete(ctx context.Context, user claims.AuthInfo, req *resource return rsp, nil } if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion { - rsp.Error = AsErrorResult(ErrOptimisticLockingFailed) + rsp.Error = &ErrOptimisticLockingFailed return rsp, nil } @@ -809,8 +810,7 @@ func (s *server) delete(ctx context.Context, user claims.AuthInfo, req *resource Resource: req.Key.Resource, Namespace: req.Key.Namespace, Name: req.Key.Name, - Folder: latest.Folder, - }) + }, latest.Folder) if err != nil { rsp.Error = AsErrorResult(err) return rsp, nil @@ -876,10 +876,6 @@ func (s *server) Read(ctx context.Context, req *resourcepb.ReadRequest) (*resour }}, nil } - // if req.Key.Group == "" { - // status, _ := AsErrorResult(apierrors.NewBadRequest("missing group")) - // return &ReadResponse{Status: status}, nil - // } if req.Key.Resource == "" { return &resourcepb.ReadResponse{Error: NewBadRequestError("missing resource")}, nil } @@ -912,8 +908,7 @@ func (s *server) read(ctx context.Context, user claims.AuthInfo, req *resourcepb Resource: req.Key.Resource, Namespace: req.Key.Namespace, Name: req.Key.Name, - Folder: rsp.Folder, - }) + }, rsp.Folder) if err != nil { return &resourcepb.ReadResponse{Error: AsErrorResult(err)}, nil } diff --git a/pkg/storage/unified/resource/server_test.go b/pkg/storage/unified/resource/server_test.go index b2f1cd9d585..7e09184d7ad 100644 --- a/pkg/storage/unified/resource/server_test.go +++ b/pkg/storage/unified/resource/server_test.go @@ -477,11 +477,12 @@ func TestSimpleServer(t *testing.T) { ResourceVersion: created.ResourceVersion}) require.NoError(t, err) - _, err = server.Update(ctx, &resourcepb.UpdateRequest{ + rsp, _ := server.Update(ctx, &resourcepb.UpdateRequest{ Key: key, Value: raw, ResourceVersion: created.ResourceVersion}) - require.ErrorIs(t, err, ErrOptimisticLockingFailed) + require.Equal(t, rsp.Error.Code, ErrOptimisticLockingFailed.Code) + require.Equal(t, rsp.Error.Message, ErrOptimisticLockingFailed.Message) }) } diff --git a/pkg/storage/unified/search/bleve_test.go b/pkg/storage/unified/search/bleve_test.go index 62725adda5d..d57196b842d 100644 --- a/pkg/storage/unified/search/bleve_test.go +++ b/pkg/storage/unified/search/bleve_test.go @@ -15,7 +15,6 @@ import ( "time" "github.com/blevesearch/bleve/v2" - authlib "github.com/grafana/authlib/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" @@ -23,6 +22,8 @@ import ( "go.uber.org/atomic" "go.uber.org/goleak" + authlib "github.com/grafana/authlib/types" + "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/infra/log/logtest" @@ -652,7 +653,7 @@ type StubAccessClient struct { resourceResponses map[string]bool // key is the resource name, and bool if what the checker will return } -func (nc *StubAccessClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (nc *StubAccessClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { return authlib.CheckResponse{Allowed: nc.resourceResponses[req.Resource]}, nil } diff --git a/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json b/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json index 5989ecb1804..a77725e2cc2 100644 --- a/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json +++ b/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json @@ -26,7 +26,6 @@ "createdBy": "user:be2g71ke8yoe8b", "fields": { "ds_types": [ - "datasource", "my-custom-plugin" ], "errors_last_1_days": 1, @@ -47,12 +46,6 @@ "kind": "DataSource", "name": "DSUID" }, - { - "relation": "depends-on", - "group": "datasource", - "kind": "DataSource", - "name": "grafana" - }, { "relation": "depends-on", "group": "dashboards.grafana.app", diff --git a/pkg/storage/unified/sql/backend.go b/pkg/storage/unified/sql/backend.go index edbbe7760e0..1337bd7c2a9 100644 --- a/pkg/storage/unified/sql/backend.go +++ b/pkg/storage/unified/sql/backend.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/otel/trace/noop" "google.golang.org/protobuf/proto" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "github.com/grafana/grafana/pkg/util/sqlite" @@ -405,15 +406,18 @@ func (b *backend) update(ctx context.Context, event resource.WriteEvent) (int64, // Use rvManager.ExecWithRV instead of direct transaction rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) { // 1. Update resource - _, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ + res, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ SQLTemplate: sqltemplate.New(b.dialect), - WriteEvent: event, + WriteEvent: event, // includes the RV Folder: folder, GUID: event.GUID, }) if err != nil { return event.GUID, fmt.Errorf("resource update: %w", err) } + if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil { + return event.GUID, err + } // 2. Insert into resource history if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{ @@ -460,7 +464,7 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64, } rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) { // 1. delete from resource - _, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{ + res, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{ SQLTemplate: sqltemplate.New(b.dialect), WriteEvent: event, GUID: event.GUID, @@ -468,6 +472,9 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64, if err != nil { return event.GUID, fmt.Errorf("delete resource: %w", err) } + if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil { + return event.GUID, err + } // 2. Add event to resource history if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{ @@ -504,6 +511,28 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64, return rv, nil } +func (b *backend) checkConflict(res db.Result, key *resourcepb.ResourceKey, rv int64) error { + if rv == 0 { + return nil + } + + // The RV is part of the update request, and it may no longer be the most recent + rows, err := res.RowsAffected() + if err != nil { + return fmt.Errorf("unable to verify RV: %w", err) + } + if rows == 1 { + return nil // expected one result + } + if rows > 0 { + return fmt.Errorf("multiple rows effected (%d)", rows) + } + return apierrors.NewConflict(schema.GroupResource{ + Group: key.Group, + Resource: key.Resource, + }, key.Name, fmt.Errorf("resource version does not match current value")) +} + func (b *backend) ReadResource(ctx context.Context, req *resourcepb.ReadRequest) *resource.BackendReadResponse { _, span := b.tracer.Start(ctx, tracePrefix+".Read") defer span.End() diff --git a/pkg/storage/unified/sql/backend_test.go b/pkg/storage/unified/sql/backend_test.go index 534b393065e..65e64a76ac2 100644 --- a/pkg/storage/unified/sql/backend_test.go +++ b/pkg/storage/unified/sql/backend_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" - "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/pkg/storage/unified/sql/data/resource_delete.sql b/pkg/storage/unified/sql/data/resource_delete.sql index 5a11a97afaf..c621313ac3f 100644 --- a/pkg/storage/unified/sql/data/resource_delete.sql +++ b/pkg/storage/unified/sql/data/resource_delete.sql @@ -5,5 +5,6 @@ DELETE FROM {{ .Ident "resource" }} AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }} {{ if .WriteEvent.Key.Name }} AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }} + AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }} {{ end }} ; diff --git a/pkg/storage/unified/sql/data/resource_update.sql b/pkg/storage/unified/sql/data/resource_update.sql index 59ea5f4cdda..800de1a9fa6 100644 --- a/pkg/storage/unified/sql/data/resource_update.sql +++ b/pkg/storage/unified/sql/data/resource_update.sql @@ -10,4 +10,5 @@ UPDATE {{ .Ident "resource" }} AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }} AND {{ .Ident "namespace" }} = {{ .Arg .WriteEvent.Key.Namespace }} AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }} + AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }} ; diff --git a/pkg/storage/unified/sql/list_iterator_test.go b/pkg/storage/unified/sql/list_iterator_test.go index f9119b91b61..7edafd675a8 100644 --- a/pkg/storage/unified/sql/list_iterator_test.go +++ b/pkg/storage/unified/sql/list_iterator_test.go @@ -87,14 +87,23 @@ func TestIntegrationListIter(t *testing.T) { Group: item.group, Name: item.name, }, - Value: item.value, - PreviousRV: 0, + Value: item.value, }, }) if err != nil { return fmt.Errorf("failed to insert test data: %w", err) } - _, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ + + if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdateRV, sqlResourceUpdateRVRequest{ + SQLTemplate: sqltemplate.New(dialect), + GUIDToRV: map[string]int64{ + item.guid: item.resourceVersion, + }, + }); err != nil { + return fmt.Errorf("failed to insert test data: %w", err) + } + + if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ SQLTemplate: sqltemplate.New(dialect), GUID: item.guid, ResourceVersion: item.resourceVersion, @@ -110,8 +119,7 @@ func TestIntegrationListIter(t *testing.T) { PreviousRV: item.resourceVersion, Type: 1, }, - }) - if err != nil { + }); err != nil { return fmt.Errorf("failed to insert resource version: %w", err) } } diff --git a/pkg/storage/unified/sql/queries_test.go b/pkg/storage/unified/sql/queries_test.go index 8cb7a328714..e8fdb31f084 100644 --- a/pkg/storage/unified/sql/queries_test.go +++ b/pkg/storage/unified/sql/queries_test.go @@ -31,6 +31,21 @@ func TestUnifiedStorageQueries(t *testing.T) { }, }, }, + { + Name: "with rv", + Data: &sqlResourceRequest{ + SQLTemplate: mocks.NewTestingSQLTemplate(), + WriteEvent: resource.WriteEvent{ + Key: &resourcepb.ResourceKey{ + Namespace: "nn", + Group: "gg", + Resource: "rr", + Name: "name", + }, + PreviousRV: 1234, + }, + }, + }, }, sqlResourceInsert: { { @@ -63,6 +78,7 @@ func TestUnifiedStorageQueries(t *testing.T) { Resource: "rr", Name: "name", }, + PreviousRV: 1759304090100678, }, Folder: "fldr", }, diff --git a/pkg/storage/unified/sql/rv_manager.go b/pkg/storage/unified/sql/rv_manager.go index 0ffce47b259..4c6a092d937 100644 --- a/pkg/storage/unified/sql/rv_manager.go +++ b/pkg/storage/unified/sql/rv_manager.go @@ -263,7 +263,7 @@ func (m *resourceVersionManager) execBatch(ctx context.Context, group, resource attribute.Int("operation_index", i), attribute.String("error", err.Error()), )) - return fmt.Errorf("failed to execute function: %w", err) + return err } guids[i] = guid } diff --git a/pkg/storage/unified/sql/service.go b/pkg/storage/unified/sql/service.go index 6dd851ad74f..79d4615f53c 100644 --- a/pkg/storage/unified/sql/service.go +++ b/pkg/storage/unified/sql/service.go @@ -6,10 +6,12 @@ import ( "fmt" "hash/fnv" "net" + "net/http" "os" "strconv" "time" + "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.opentelemetry.io/otel" @@ -94,6 +96,7 @@ func ProvideUnifiedStorageGrpcService( indexMetrics *resource.BleveIndexMetrics, searchRing *ring.Ring, memberlistKVConfig kv.Config, + httpServerRouter *mux.Router, ) (UnifiedStorageGrpcService, error) { var err error tracer := otel.Tracer("unified-storage") @@ -159,6 +162,10 @@ func ProvideUnifiedStorageGrpcService( s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true) subservices = append(subservices, s.ringLifecycler) + + if httpServerRouter != nil { + httpServerRouter.Path("/prepare-downscale").Methods("GET", "POST", "DELETE").Handler(http.HandlerFunc(s.PrepareDownscale)) + } } if cfg.QOSEnabled { @@ -194,6 +201,21 @@ func ProvideUnifiedStorageGrpcService( return s, nil } +func (s *service) PrepareDownscale(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + s.log.Info("Preparing for downscale. Will not keep instance in ring on shutdown.") + s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(false) + case http.MethodDelete: + s.log.Info("Downscale canceled. Will keep instance in ring on shutdown.") + s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true) + case http.MethodGet: + // used for delayed downscale use case, which we don't support. Leaving here for completion sake + s.log.Info("Received GET request for prepare-downscale. Behavior not implemented.") + default: + } +} + var ( // operation used by the search-servers to check if they own the namespace searchOwnerRead = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, nil) diff --git a/pkg/storage/unified/sql/test/integration_test.go b/pkg/storage/unified/sql/test/integration_test.go index c5420b71280..347a28c4f11 100644 --- a/pkg/storage/unified/sql/test/integration_test.go +++ b/pkg/storage/unified/sql/test/integration_test.go @@ -128,7 +128,7 @@ func TestClientServer(t *testing.T) { features := featuremgmt.WithFeatures() - svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}) + svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil) require.NoError(t, err) var client resourcepb.ResourceStoreClient diff --git a/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql b/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql index a44f4bb6cc7..80f5c76156a 100755 --- a/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql +++ b/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql @@ -4,4 +4,5 @@ DELETE FROM `resource` AND `group` = 'gg' AND `resource` = 'rr' AND `name` = 'name' + AND `resource_version` = 0 ; diff --git a/pkg/storage/unified/sql/testdata/mysql--resource_delete-with rv.sql b/pkg/storage/unified/sql/testdata/mysql--resource_delete-with rv.sql new file mode 100755 index 00000000000..f97a8f2867d --- /dev/null +++ b/pkg/storage/unified/sql/testdata/mysql--resource_delete-with rv.sql @@ -0,0 +1,8 @@ +DELETE FROM `resource` + WHERE 1 = 1 + AND `namespace` = 'nn' + AND `group` = 'gg' + AND `resource` = 'rr' + AND `name` = 'name' + AND `resource_version` = 1234 +; diff --git a/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql b/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql index e54cda2c0d7..e28f089b91c 100755 --- a/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql +++ b/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql @@ -10,4 +10,5 @@ UPDATE `resource` AND `resource` = 'rr' AND `namespace` = 'nn' AND `name` = 'name' + AND `resource_version` = 1759304090100678 ; diff --git a/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql b/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql index f7fb550b42b..3070bdf23f9 100755 --- a/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql +++ b/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql @@ -4,4 +4,5 @@ DELETE FROM "resource" AND "group" = 'gg' AND "resource" = 'rr' AND "name" = 'name' + AND "resource_version" = 0 ; diff --git a/pkg/storage/unified/sql/testdata/postgres--resource_delete-with rv.sql b/pkg/storage/unified/sql/testdata/postgres--resource_delete-with rv.sql new file mode 100755 index 00000000000..da4c4cfe65d --- /dev/null +++ b/pkg/storage/unified/sql/testdata/postgres--resource_delete-with rv.sql @@ -0,0 +1,8 @@ +DELETE FROM "resource" + WHERE 1 = 1 + AND "namespace" = 'nn' + AND "group" = 'gg' + AND "resource" = 'rr' + AND "name" = 'name' + AND "resource_version" = 1234 +; diff --git a/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql b/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql index 2b549ed8925..bedf09f15e0 100755 --- a/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql +++ b/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql @@ -10,4 +10,5 @@ UPDATE "resource" AND "resource" = 'rr' AND "namespace" = 'nn' AND "name" = 'name' + AND "resource_version" = 1759304090100678 ; diff --git a/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql index f7fb550b42b..3070bdf23f9 100755 --- a/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql +++ b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql @@ -4,4 +4,5 @@ DELETE FROM "resource" AND "group" = 'gg' AND "resource" = 'rr' AND "name" = 'name' + AND "resource_version" = 0 ; diff --git a/pkg/storage/unified/sql/testdata/sqlite--resource_delete-with rv.sql b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-with rv.sql new file mode 100755 index 00000000000..da4c4cfe65d --- /dev/null +++ b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-with rv.sql @@ -0,0 +1,8 @@ +DELETE FROM "resource" + WHERE 1 = 1 + AND "namespace" = 'nn' + AND "group" = 'gg' + AND "resource" = 'rr' + AND "name" = 'name' + AND "resource_version" = 1234 +; diff --git a/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql b/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql index 2b549ed8925..bedf09f15e0 100755 --- a/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql +++ b/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql @@ -10,4 +10,5 @@ UPDATE "resource" AND "resource" = 'rr' AND "namespace" = 'nn' AND "name" = 'name' + AND "resource_version" = 1759304090100678 ; diff --git a/pkg/storage/unified/testing/server.go b/pkg/storage/unified/testing/server.go index 5916ee3cbb4..a83a7d13564 100644 --- a/pkg/storage/unified/testing/server.go +++ b/pkg/storage/unified/testing/server.go @@ -125,12 +125,15 @@ func runTestResourcePermissionScenarios(t *testing.T, backend resource.StorageBa resourceUID := fmt.Sprintf("test123-%d", i) // Create a mock access client with the test case's permission map - checksPerformed := []types.CheckRequest{} + checksPerformed := []CheckRequestEX{} mockAccess := &mockAccessClient{ allowed: false, // Default to false allowedMap: tc.permissionMap, - checkFn: func(req types.CheckRequest) { - checksPerformed = append(checksPerformed, req) + checkFn: func(req types.CheckRequest, folder string) { + checksPerformed = append(checksPerformed, CheckRequestEX{ + CheckRequest: req, + Folder: folder, + }) }, } @@ -167,7 +170,7 @@ func runTestResourcePermissionScenarios(t *testing.T, backend resource.StorageBa } }`, resourceName, resourceUID, nsPrefix+"-ns1", tc.initialFolder, i) - checksPerformed = []types.CheckRequest{} + checksPerformed = []CheckRequestEX{} created, err := server.Create(ctx, &resourcepb.CreateRequest{ Value: []byte(resourceJSON), Key: key, @@ -232,7 +235,7 @@ func runTestResourcePermissionScenarios(t *testing.T, backend resource.StorageBa }`, resourceName, resourceUID, nsPrefix+"-ns1", tc.targetFolder, i) mockAccess.allowed = false // Reset to use the map - checksPerformed = []types.CheckRequest{} + checksPerformed = []CheckRequestEX{} updated, err := server.Update(ctx, &resourcepb.UpdateRequest{ Key: key, @@ -494,18 +497,18 @@ func runTestListTrashAccessControl(t *testing.T, backend resource.StorageBackend type mockAccessClient struct { allowed bool allowedMap map[string]bool - checkFn func(types.CheckRequest) + checkFn func(types.CheckRequest, string) compileFn func(user types.AuthInfo, req types.ListRequest) types.ItemChecker } -func (m *mockAccessClient) Check(ctx context.Context, user types.AuthInfo, req types.CheckRequest) (types.CheckResponse, error) { +func (m *mockAccessClient) Check(ctx context.Context, user types.AuthInfo, req types.CheckRequest, folder string) (types.CheckResponse, error) { if m.checkFn != nil { - m.checkFn(req) + m.checkFn(req, folder) } // Check specific folder:verb mappings if provided if m.allowedMap != nil { - key := fmt.Sprintf("%s:%s", req.Folder, req.Verb) + key := fmt.Sprintf("%s:%s", folder, req.Verb) if allowed, exists := m.allowedMap[key]; exists { return types.CheckResponse{Allowed: allowed}, nil } @@ -526,3 +529,8 @@ func (m *mockAccessClient) Compile(ctx context.Context, user types.AuthInfo, req return m.allowed }, types.NoopZookie{}, nil } + +type CheckRequestEX struct { + types.CheckRequest + Folder string +} diff --git a/pkg/storage/unified/testing/storage_backend.go b/pkg/storage/unified/testing/storage_backend.go index 712d1236475..0dde7344f18 100644 --- a/pkg/storage/unified/testing/storage_backend.go +++ b/pkg/storage/unified/testing/storage_backend.go @@ -124,13 +124,13 @@ func runTestIntegrationBackendHappyPath(t *testing.T, backend resource.StorageBa }) t.Run("Update item2", func(t *testing.T) { - rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2)) require.NoError(t, err) require.Greater(t, rv4, rv3) }) t.Run("Delete item1", func(t *testing.T) { - rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rv5, rv4) }) @@ -352,10 +352,10 @@ func runTestIntegrationBackendList(t *testing.T, backend resource.StorageBackend rv5, err := writeEvent(ctx, backend, "item5", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv5, rv4) - rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2)) require.NoError(t, err) require.Greater(t, rv6, rv5) - rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv3)) require.NoError(t, err) require.Greater(t, rv7, rv6) rv8, err := writeEvent(ctx, backend, "item6", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) @@ -490,10 +490,10 @@ func runTestIntegrationBackendListModifiedSince(t *testing.T, backend resource.S ns := nsPrefix + "-history-ns" rvCreated, _ := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.Greater(t, rvCreated, int64(0)) - rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvCreated)) require.NoError(t, err) require.Greater(t, rvUpdated, rvCreated) - rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rvUpdated)) require.NoError(t, err) require.Greater(t, rvDeleted, rvUpdated) @@ -610,19 +610,19 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage require.Greater(t, rv1, int64(0)) // add 5 events for item1 - should be saved to history - rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rvHistory1, rv1) - rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory1)) require.NoError(t, err) require.Greater(t, rvHistory2, rvHistory1) - rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory2)) require.NoError(t, err) require.Greater(t, rvHistory3, rvHistory2) - rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory3)) require.NoError(t, err) require.Greater(t, rvHistory4, rvHistory3) - rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory4)) require.NoError(t, err) require.Greater(t, rvHistory5, rvHistory4) @@ -804,8 +804,9 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage resourceVersions = append(resourceVersions, initialRV) // Create 9 more versions with modifications + rv := initialRV for i := 0; i < 9; i++ { - rv, err := writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns2)) + rv, err = writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns2, rv)) require.NoError(t, err) resourceVersions = append(resourceVersions, rv) } @@ -907,7 +908,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage // Create a resource and delete it rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) - rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv)) require.NoError(t, err) require.Greater(t, rvDeleted, rv) @@ -932,7 +933,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage // Create a resource and delete it rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) - rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv)) require.NoError(t, err) require.Greater(t, rvDeleted, rv) @@ -940,7 +941,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage rv1, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv1, rvDeleted) - rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rv2, rv1) @@ -983,8 +984,8 @@ func runTestIntegrationBackendListHistoryErrorReporting(t *testing.T, backend re const events = 500 prevRv := origRv - for i := 0; i < events; i++ { - rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespace(ns), WithGroup(group), WithResource(resourceName)) + for range events { + rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, prevRv), WithGroup(group), WithResource(resourceName)) require.NoError(t, err) require.Greater(t, rv, prevRv) prevRv = rv @@ -1131,6 +1132,14 @@ func runTestIntegrationBackendCreateNewResource(t *testing.T, backend resource.S // WriteEventOption is a function that modifies WriteEventOptions type WriteEventOption func(*WriteEventOptions) +// WithNamespace sets the namespace for the write event +func WithNamespaceAndRV(namespace string, rv int64) WriteEventOption { + return func(o *WriteEventOptions) { + o.Namespace = namespace + o.PreviousRV = rv + } +} + // WithNamespace sets the namespace for the write event func WithNamespace(namespace string) WriteEventOption { return func(o *WriteEventOptions) { @@ -1180,11 +1189,12 @@ func WithValue(value string) WriteEventOption { } type WriteEventOptions struct { - Namespace string - Group string - Resource string - Folder string - Value []byte + Namespace string + Group string + Resource string + Folder string + Value []byte + PreviousRV int64 } func writeEvent(ctx context.Context, store resource.StorageBackend, name string, action resourcepb.WatchEvent_Type, opts ...WriteEventOption) (int64, error) { @@ -1236,6 +1246,7 @@ func writeEvent(ctx context.Context, store resource.StorageBackend, name string, Resource: options.Resource, Name: name, }, + PreviousRV: options.PreviousRV, } switch action { case resourcepb.WatchEvent_DELETED: @@ -1285,18 +1296,15 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken rv1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv1, int64(0)) - rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rvDelete1, rv1) - rvDelete2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) - require.NoError(t, err) - require.Greater(t, rvDelete2, rvDelete1) // item2 deleted and recreated, should not be returned in trash rv2, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv2, int64(0)) - rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv2)) require.NoError(t, err) require.Greater(t, rvDelete3, rv2) rv3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) @@ -1325,10 +1333,10 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken }, }, }, - expectedVersions: []int64{rvDelete2}, + expectedVersions: []int64{rvDelete1}, expectedValues: []string{"item1 DELETED"}, - minExpectedHeadRV: rvDelete2, - expectedContinueRV: rvDelete2, + minExpectedHeadRV: rvDelete1, + expectedContinueRV: rvDelete1, expectedSortAsc: false, }, { diff --git a/pkg/tests/api/plugins/data/expectedListResp.json b/pkg/tests/api/plugins/data/expectedListResp.json index ae511c7eb77..78d56e06675 100644 --- a/pkg/tests/api/plugins/data/expectedListResp.json +++ b/pkg/tests/api/plugins/data/expectedListResp.json @@ -209,7 +209,7 @@ "path": "public/plugins/grafana-azure-monitor-datasource/img/azure_monitor_cpu.png" } ], - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": [ "azure", @@ -880,7 +880,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -934,7 +934,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": [ "grafana", @@ -1000,7 +1000,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1217,7 +1217,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1325,7 +1325,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1375,7 +1375,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1425,7 +1425,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1629,7 +1629,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": [ "grafana", @@ -1734,12 +1734,12 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, "dependencies": { - "grafanaDependency": "", + "grafanaDependency": "\u003e=11.6.0", "grafanaVersion": "*", "plugins": [], "extensions": { @@ -2042,7 +2042,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -2092,7 +2092,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -2445,7 +2445,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, diff --git a/pkg/tests/apis/dashboard/dashboards_test.go b/pkg/tests/apis/dashboard/dashboards_test.go index 930356e18c1..e6739330286 100644 --- a/pkg/tests/apis/dashboard/dashboards_test.go +++ b/pkg/tests/apis/dashboard/dashboards_test.go @@ -15,6 +15,10 @@ import ( "k8s.io/client-go/dynamic" k8srest "k8s.io/client-go/rest" + dashboardV0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1" + dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1" + dashboardV2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1" + dashboardV2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1" "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apiserver/rest" @@ -24,11 +28,6 @@ import ( "github.com/grafana/grafana/pkg/tests/apis" "github.com/grafana/grafana/pkg/tests/testinfra" "github.com/grafana/grafana/pkg/tests/testsuite" - - dashboardV0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1" - dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1" - dashboardV2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1" - dashboardV2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1" "github.com/grafana/grafana/pkg/util/testutil" ) diff --git a/pkg/tests/apis/folder/folders_test.go b/pkg/tests/apis/folder/folders_test.go index 7976f24d547..3449f50372c 100644 --- a/pkg/tests/apis/folder/folders_test.go +++ b/pkg/tests/apis/folder/folders_test.go @@ -10,24 +10,23 @@ import ( "testing" "time" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/grafana/grafana/pkg/expr" - apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" - "github.com/prometheus/common/model" - folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1" "github.com/grafana/grafana/pkg/api/dtos" grafanarest "github.com/grafana/grafana/pkg/apiserver/rest" + "github.com/grafana/grafana/pkg/expr" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/services/accesscontrol/resourcepermissions" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/folder" + apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/setting" diff --git a/pkg/tests/apis/iam/iam_test.go b/pkg/tests/apis/iam/iam_test.go index 5ad2eccd8f4..ffd7dedf77c 100644 --- a/pkg/tests/apis/iam/iam_test.go +++ b/pkg/tests/apis/iam/iam_test.go @@ -69,8 +69,7 @@ func TestIntegrationIdentity(t *testing.T) { "title": "staff", "provisioned": false, "externalUID": "" - }, - "status": {} + } } ] }`, found) diff --git a/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json b/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json index a1d5158e140..9a7b8ef0481 100644 --- a/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json @@ -3502,8 +3502,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -3530,14 +3529,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -3618,66 +3609,11 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.Team": { "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -3704,14 +3640,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -3726,8 +3654,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -3754,14 +3681,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -3843,30 +3762,6 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingstatusOperatorState" - } - ] - } - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingTeamRef": { "type": "object", "required": [ @@ -3899,36 +3794,6 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamList": { "type": "object", "required": [ @@ -3999,66 +3864,11 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.User": { "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4085,14 +3895,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -4193,60 +3995,6 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.pkg.apimachinery.apis.common.v0alpha1.Unstructured": { "type": "object", "additionalProperties": true, @@ -4705,8 +4453,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4723,9 +4470,6 @@ "spec": { "description": "Spec is the spec of the CoreRole", "default": {} - }, - "status": { - "default": {} } } }, @@ -4864,8 +4608,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4882,9 +4625,6 @@ "spec": { "description": "Spec is the spec of the GlobalRole", "default": {} - }, - "status": { - "default": {} } } }, @@ -4892,8 +4632,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4910,9 +4649,6 @@ "spec": { "description": "Spec is the spec of the GlobalRoleBinding", "default": {} - }, - "status": { - "default": {} } } }, @@ -5182,8 +4918,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5200,9 +4935,6 @@ "spec": { "description": "Spec is the spec of the ResourcePermission", "default": {} - }, - "status": { - "default": {} } } }, @@ -5353,8 +5085,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5371,9 +5102,6 @@ "spec": { "description": "Spec is the spec of the Role", "default": {} - }, - "status": { - "default": {} } } }, @@ -5381,8 +5109,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5399,9 +5126,6 @@ "spec": { "description": "Spec is the spec of the RoleBinding", "default": {} - }, - "status": { - "default": {} } } }, @@ -5671,8 +5395,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5689,9 +5412,6 @@ "spec": { "description": "Spec is the spec of the ServiceAccount", "default": {} - }, - "status": { - "default": {} } } }, @@ -5801,8 +5521,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5819,9 +5538,6 @@ "spec": { "description": "Spec is the spec of the Team", "default": {} - }, - "status": { - "default": {} } } }, @@ -5829,8 +5545,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5847,9 +5562,6 @@ "spec": { "description": "Spec is the spec of the TeamBinding", "default": {} - }, - "status": { - "default": {} } } }, @@ -6084,8 +5796,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -6102,9 +5813,6 @@ "spec": { "description": "Spec is the spec of the User", "default": {} - }, - "status": { - "default": {} } } }, diff --git a/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json b/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json index c5d87c4088d..1b9b680af34 100644 --- a/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json @@ -43,6 +43,98 @@ ], "description": "list objects of kind Preferences", "operationId": "listPreferences", + "parameters": [ + { + "name": "allowWatchBookmarks", + "in": "query", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "continue", + "in": "query", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldSelector", + "in": "query", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "labelSelector", + "in": "query", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "limit", + "in": "query", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "name": "resourceVersion", + "in": "query", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "resourceVersionMatch", + "in": "query", + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "sendInitialEvents", + "in": "query", + "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "timeoutSeconds", + "in": "query", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "name": "watch", + "in": "query", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ], "responses": { "200": { "description": "OK", @@ -82,52 +174,131 @@ "kind": "Preferences" } }, + "post": { + "tags": [ + "Preferences" + ], + "description": "create Preferences", + "operationId": "createPreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldManager", + "in": "query", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldValidation", + "in": "query", + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "201": { + "description": "Created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "202": { + "description": "Accepted", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + } + }, + "x-kubernetes-action": "post", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, "parameters": [ - { - "name": "allowWatchBookmarks", - "in": "query", - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "name": "continue", - "in": "query", - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "fieldSelector", - "in": "query", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "labelSelector", - "in": "query", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "limit", - "in": "query", - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, { "name": "namespace", "in": "path", @@ -146,51 +317,6 @@ "type": "string", "uniqueItems": true } - }, - { - "name": "resourceVersion", - "in": "query", - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "resourceVersionMatch", - "in": "query", - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "sendInitialEvents", - "in": "query", - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "name": "timeoutSeconds", - "in": "query", - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "name": "watch", - "in": "query", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "schema": { - "type": "boolean", - "uniqueItems": true - } } ] }, @@ -260,6 +386,330 @@ "kind": "Preferences" } }, + "put": { + "tags": [ + "Preferences" + ], + "description": "replace the specified Preferences", + "operationId": "replacePreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldManager", + "in": "query", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldValidation", + "in": "query", + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "201": { + "description": "Created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + } + }, + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, + "delete": { + "tags": [ + "Preferences" + ], + "description": "delete Preferences", + "operationId": "deletePreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "gracePeriodSeconds", + "in": "query", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "name": "ignoreStoreReadErrorWithClusterBreakingPotential", + "in": "query", + "description": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "orphanDependents", + "in": "query", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "propagationPolicy", + "in": "query", + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + } + }, + "202": { + "description": "Accepted", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + } + } + }, + "x-kubernetes-action": "delete", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, + "patch": { + "tags": [ + "Preferences" + ], + "description": "partially update the specified Preferences", + "operationId": "updatePreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldManager", + "in": "query", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldValidation", + "in": "query", + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "force", + "in": "query", + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/apply-patch+yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/json-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/merge-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/strategic-merge-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "201": { + "description": "Created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, "parameters": [ { "name": "name", @@ -1232,8 +1682,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -1260,14 +1709,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -1412,66 +1853,11 @@ } } }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Stars": { "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -1498,14 +1884,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -1605,60 +1983,6 @@ } } }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { "description": "APIResource specifies the name of a resource and whether it is namespaced.", "type": "object", diff --git a/pkg/tests/apis/preferences/preferences_test.go b/pkg/tests/apis/preferences/preferences_test.go index 01caaae8318..5de09e16fa8 100644 --- a/pkg/tests/apis/preferences/preferences_test.go +++ b/pkg/tests/apis/preferences/preferences_test.go @@ -85,6 +85,8 @@ func TestIntegrationPreferences(t *testing.T) { }, &raw) require.Equal(t, http.StatusOK, legacyResponse.Response.StatusCode, "create preference for user") + adminPrefsName := "user-" + clientAdmin.Args.User.Identity.GetIdentifier() + // Admin has access to all three (namespace, team, and user) rsp, err = clientAdmin.Resource.List(ctx, metav1.ListOptions{}) require.NoError(t, err) @@ -95,9 +97,31 @@ func TestIntegrationPreferences(t *testing.T) { require.Equal(t, []string{ "namespace", fmt.Sprintf("team-%s", helper.Org1.Staff.UID), - fmt.Sprintf("user-%s", clientAdmin.Args.User.Identity.GetIdentifier()), + adminPrefsName, }, names) + obj, err := clientAdmin.Resource.Get(ctx, adminPrefsName, metav1.GetOptions{}) + require.NoError(t, err) + jj, err := json.MarshalIndent(obj.Object["spec"], "", " ") + require.NoError(t, err) + require.JSONEq(t, `{ + "weekStart":"saturday" + }`, string(jj)) + obj.Object["spec"] = map[string]any{ + "weekStart": "saturday", + "regionalFormat": "dd/mm/yyyy", + } + + // Set the regional format via k8s API + obj, err = clientAdmin.Resource.Update(ctx, obj, metav1.UpdateOptions{}) + require.NoError(t, err) + jj, err = json.MarshalIndent(obj.Object["spec"], "", " ") + require.NoError(t, err) + require.JSONEq(t, `{ + "weekStart": "saturday", + "regionalFormat": "dd/mm/yyyy" + }`, string(jj)) + // The viewer should only have namespace (eg org level) permissions rsp, err = clientViewer.Resource.List(ctx, metav1.ListOptions{}) require.NoError(t, err) @@ -118,14 +142,14 @@ func TestIntegrationPreferences(t *testing.T) { }, &shim{}) require.Equal(t, http.StatusOK, bootdata.Response.StatusCode, "get bootdata preferences") - jj, _ := json.Marshal(bootdata.Result.User) + jj, _ = json.Marshal(bootdata.Result.User) require.JSONEq(t, `{ "timezone":"africa", "weekStart":"saturday", "theme":"dark", "language":"en-US", `+ // FROM global default! - `"regionalFormat":"" - }`, string(jj)) + `"regionalFormat": ""}`, // why empty? + string(jj)) merged := apis.DoRequest(helper, apis.RequestParams{ User: clientAdmin.Args.User, @@ -133,9 +157,10 @@ func TestIntegrationPreferences(t *testing.T) { Path: "/apis/preferences.grafana.app/v1alpha1/namespaces/default/preferences/merged", }, &preferences.Preferences{}) require.Equal(t, http.StatusOK, merged.Response.StatusCode, "get merged preferences") - require.Equal(t, "saturday", *merged.Result.Spec.WeekStart) // from user - require.Equal(t, "africa", *merged.Result.Spec.Timezone) // from team - require.Equal(t, "dark", *merged.Result.Spec.Theme) // from org - require.Equal(t, "en-US", *merged.Result.Spec.Language) // settings.ini + require.Equal(t, "saturday", *merged.Result.Spec.WeekStart) // from user + require.Equal(t, "africa", *merged.Result.Spec.Timezone) // from team + require.Equal(t, "dark", *merged.Result.Spec.Theme) // from org + require.Equal(t, "en-US", *merged.Result.Spec.Language) // settings.ini + require.Equal(t, "dd/mm/yyyy", *merged.Result.Spec.RegionalFormat) // from user update }) } diff --git a/pkg/tests/testinfra/testinfra.go b/pkg/tests/testinfra/testinfra.go index a4461902fa6..b9f23fb4d44 100644 --- a/pkg/tests/testinfra/testinfra.go +++ b/pkg/tests/testinfra/testinfra.go @@ -129,7 +129,7 @@ func StartGrafanaEnv(t *testing.T, grafDir, cfgPath string) (string, *server.Tes var storage sql.UnifiedStorageGrpcService if runstore { storage, err = sql.ProvideUnifiedStorageGrpcService(env.Cfg, env.FeatureToggles, env.SQLStore, - env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}) + env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil) require.NoError(t, err) ctx := context.Background() err = storage.StartAsync(ctx) diff --git a/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth.go new file mode 100644 index 00000000000..74bc6923eac --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth.go @@ -0,0 +1,117 @@ +package pgx + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "strings" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/log" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" +) + +func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + err := e.Ping(ctx) + if err != nil { + logCheckHealthError(ctx, e.dsInfo, err) + if strings.EqualFold(req.PluginContext.User.Role, "Admin") { + return ErrToHealthCheckResult(err) + } + errResponse := &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: e.TransformQueryError(e.log, err).Error(), + } + return errResponse, nil + } + return &backend.CheckHealthResult{Status: backend.HealthStatusOk, Message: "Database Connection OK"}, nil +} + +// ErrToHealthCheckResult converts error into user friendly health check message +// This should be called with non nil error. If the err parameter is empty, we will send Internal Server Error +func ErrToHealthCheckResult(err error) (*backend.CheckHealthResult, error) { + if err == nil { + return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"}, nil + } + res := &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: err.Error()} + details := map[string]string{ + "verboseMessage": err.Error(), + "errorDetailsLink": "https://grafana.com/docs/grafana/latest/datasources/postgres", + } + var opErr *net.OpError + if errors.As(err, &opErr) { + res.Message = "Network error: Failed to connect to the server" + if opErr != nil && opErr.Err != nil { + errMessage := opErr.Err.Error() + if strings.HasSuffix(opErr.Err.Error(), "no such host") { + errMessage = "no such host" + } + if strings.HasSuffix(opErr.Err.Error(), "unknown port") { + errMessage = "unknown port" + } + if strings.HasSuffix(opErr.Err.Error(), "invalid port") { + errMessage = "invalid port" + } + if strings.HasSuffix(opErr.Err.Error(), "missing port in address") { + errMessage = "missing port in address" + } + if strings.HasSuffix(opErr.Err.Error(), "invalid syntax") { + errMessage = "invalid syntax found in the address" + } + res.Message += fmt.Sprintf(". Error message: %s", errMessage) + } + } + + if errors.Is(err, sqleng.ErrParsingPostgresURL) { + res.Message = fmt.Sprintf("Connection string error: %s", sqleng.ErrParsingPostgresURL.Error()) + if unwrappedErr := errors.Unwrap(err); unwrappedErr != nil { + details["verboseMessage"] = unwrappedErr.Error() + } + } + detailBytes, marshalErr := json.Marshal(details) + if marshalErr != nil { + return res, nil + } + res.JSONDetails = detailBytes + return res, nil +} + +func logCheckHealthError(ctx context.Context, dsInfo sqleng.DataSourceInfo, err error) { + logger := log.DefaultLogger.FromContext(ctx) + configSummary := map[string]any{ + "config_url_length": len(dsInfo.URL), + "config_user_length": len(dsInfo.User), + "config_database_length": len(dsInfo.Database), + "config_json_data_database_length": len(dsInfo.JsonData.Database), + "config_max_open_conns": dsInfo.JsonData.MaxOpenConns, + "config_max_idle_conns": dsInfo.JsonData.MaxIdleConns, + "config_conn_max_life_time": dsInfo.JsonData.ConnMaxLifetime, + "config_conn_timeout": dsInfo.JsonData.ConnectionTimeout, + "config_timescaledb": dsInfo.JsonData.Timescaledb, + "config_ssl_mode": dsInfo.JsonData.Mode, + "config_tls_configuration_method": dsInfo.JsonData.ConfigurationMethod, + "config_tls_skip_verify": dsInfo.JsonData.TlsSkipVerify, + "config_timezone": dsInfo.JsonData.Timezone, + "config_time_interval": dsInfo.JsonData.TimeInterval, + "config_enable_secure_proxy": dsInfo.JsonData.SecureDSProxy, + "config_allow_clear_text_passwords": dsInfo.JsonData.AllowCleartextPasswords, + "config_authentication_type": dsInfo.JsonData.AuthenticationType, + "config_ssl_root_cert_file_length": len(dsInfo.JsonData.RootCertFile), + "config_ssl_cert_file_length": len(dsInfo.JsonData.CertFile), + "config_ssl_key_file_length": len(dsInfo.JsonData.CertKeyFile), + "config_encrypt_length": len(dsInfo.JsonData.Encrypt), + "config_server_name_length": len(dsInfo.JsonData.Servername), + "config_password_length": len(dsInfo.DecryptedSecureJSONData["password"]), + "config_tls_ca_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsCACert"]), + "config_tls_client_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsClientCert"]), + "config_tls_client_key_length": len(dsInfo.DecryptedSecureJSONData["tlsClientKey"]), + } + configSummaryJSON, marshalError := json.Marshal(configSummary) + if marshalError != nil { + logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error") + return + } + logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error_detailed", "details", string(configSummaryJSON)) +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth_test.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth_test.go new file mode 100644 index 00000000000..16536368622 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth_test.go @@ -0,0 +1,61 @@ +package pgx + +import ( + "errors" + "fmt" + "net" + "testing" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrToHealthCheckResult(t *testing.T) { + tests := []struct { + name string + err error + want *backend.CheckHealthResult + }{ + { + name: "without error", + want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"}, + }, + { + name: "network error", + err: errors.Join(errors.New("foo"), &net.OpError{Op: "read", Net: "tcp", Err: errors.New("some op")}), + want: &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "Network error: Failed to connect to the server. Error message: some op", + JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"foo\nread tcp: some op"}`), + }, + }, + { + name: "regular error", + err: errors.New("internal server error"), + want: &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "internal server error", + JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"internal server error"}`), + }, + }, + { + name: "invalid port specifier error", + err: fmt.Errorf("%w %q: %w", sqleng.ErrParsingPostgresURL, `"foo.bar.co"`, errors.New(`strconv.Atoi: parsing "foo.bar.co": invalid syntax`)), + want: &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "Connection string error: error parsing postgres url", + JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"error parsing postgres url \"\\\"foo.bar.co\\\"\": strconv.Atoi: parsing \"foo.bar.co\": invalid syntax"}`), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ErrToHealthCheckResult(tt.err) + require.Nil(t, err) + assert.Equal(t, string(tt.want.JSONDetails), string(got.JSONDetails)) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_pgx.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine.go similarity index 65% rename from pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_pgx.go rename to pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine.go index e10acb63b1a..8f8af632f13 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_pgx.go +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine.go @@ -1,25 +1,109 @@ -package sqleng +package pgx import ( "context" "encoding/json" "errors" "fmt" + "net" "runtime/debug" + "strconv" "strings" "sync" "time" "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/gtime" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" ) -func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer, +// MetaKeyExecutedQueryString is the key where the executed query should get stored +const MetaKeyExecutedQueryString = "executedQueryString" + +// SQLMacroEngine interpolates macros into sql. It takes in the Query to have access to query context and +// timeRange to be able to generate queries that use from and to. +type SQLMacroEngine interface { + Interpolate(query *backend.DataQuery, timeRange backend.TimeRange, sql string) (string, error) +} + +// SqlQueryResultTransformer transforms a query result row to RowValues with proper types. +type SqlQueryResultTransformer interface { + // TransformQueryError transforms a query error. + TransformQueryError(logger log.Logger, err error) error + GetConverterList() []sqlutil.StringConverter +} + +type JsonData struct { + MaxOpenConns int `json:"maxOpenConns"` + MaxIdleConns int `json:"maxIdleConns"` + ConnMaxLifetime int `json:"connMaxLifetime"` + ConnectionTimeout int `json:"connectionTimeout"` + Timescaledb bool `json:"timescaledb"` + Mode string `json:"sslmode"` + ConfigurationMethod string `json:"tlsConfigurationMethod"` + TlsSkipVerify bool `json:"tlsSkipVerify"` + RootCertFile string `json:"sslRootCertFile"` + CertFile string `json:"sslCertFile"` + CertKeyFile string `json:"sslKeyFile"` + Timezone string `json:"timezone"` + Encrypt string `json:"encrypt"` + Servername string `json:"servername"` + TimeInterval string `json:"timeInterval"` + Database string `json:"database"` + SecureDSProxy bool `json:"enableSecureSocksProxy"` + SecureDSProxyUsername string `json:"secureSocksProxyUsername"` + AllowCleartextPasswords bool `json:"allowCleartextPasswords"` + AuthenticationType string `json:"authenticationType"` +} + +type DataPluginConfiguration struct { + DSInfo sqleng.DataSourceInfo + TimeColumnNames []string + MetricColumnTypes []string + RowLimit int64 +} + +type DataSourceHandler struct { + macroEngine SQLMacroEngine + queryResultTransformer SqlQueryResultTransformer + timeColumnNames []string + metricColumnTypes []string + log log.Logger + dsInfo sqleng.DataSourceInfo + rowLimit int64 + userError string + pool *pgxpool.Pool +} + +type QueryJson struct { + RawSql string `json:"rawSql"` + Fill bool `json:"fill"` + FillInterval float64 `json:"fillInterval"` + FillMode string `json:"fillMode"` + FillValue float64 `json:"fillValue"` + Format string `json:"format"` +} + +func (e *DataSourceHandler) TransformQueryError(logger log.Logger, err error) error { + // OpError is the error type usually returned by functions in the net + // package. It describes the operation, network type, and address of + // an error. We log this error rather than return it to the client + // for security purposes. + var opErr *net.OpError + if errors.As(err, &opErr) { + return fmt.Errorf("failed to connect to server - %s", e.userError) + } + + return e.queryResultTransformer.TransformQueryError(logger, err) +} + +func NewQueryDataHandler(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer, macroEngine SQLMacroEngine, log log.Logger) (*DataSourceHandler, error) { queryDataHandler := DataSourceHandler{ queryResultTransformer: queryResultTransformer, @@ -43,7 +127,12 @@ func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, conf return &queryDataHandler, nil } -func (e *DataSourceHandler) DisposePGX() { +type DBDataResponse struct { + dataResponse backend.DataResponse + refID string +} + +func (e *DataSourceHandler) Dispose() { e.log.Debug("Disposing DB...") if e.pool != nil { @@ -53,11 +142,11 @@ func (e *DataSourceHandler) DisposePGX() { e.log.Debug("DB disposed") } -func (e *DataSourceHandler) PingPGX(ctx context.Context) error { +func (e *DataSourceHandler) Ping(ctx context.Context) error { return e.pool.Ping(ctx) } -func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { +func (e *DataSourceHandler) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { result := backend.NewQueryDataResponse() ch := make(chan DBDataResponse, len(req.Queries)) var wg sync.WaitGroup @@ -83,7 +172,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query } wg.Add(1) - go e.executeQueryPGX(ctx, query, &wg, ch, queryjson) + go e.executeQuery(ctx, query, &wg, ch, queryjson) } wg.Wait() @@ -101,7 +190,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query func (e *DataSourceHandler) handleQueryError(frameErr string, err error, query string, source backend.ErrorSource, ch chan DBDataResponse, queryResult DBDataResponse) { var emptyFrame data.Frame emptyFrame.SetMeta(&data.FrameMeta{ExecutedQueryString: query}) - if backend.IsDownstreamError(err) { + if isDownstreamError(err) { source = backend.ErrorSourceDownstream } queryResult.dataResponse.Error = fmt.Errorf("%s: %w", frameErr, err) @@ -127,6 +216,18 @@ func (e *DataSourceHandler) handlePanic(logger log.Logger, queryResult *DBDataRe } } +// Interpolate provides global macros/substitutions for all sql datasources. +var Interpolate = func(query backend.DataQuery, timeRange backend.TimeRange, timeInterval string, sql string) string { + interval := query.Interval + + sql = strings.ReplaceAll(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10)) + sql = strings.ReplaceAll(sql, "$__interval", gtime.FormatInterval(interval)) + sql = strings.ReplaceAll(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.From.UTC().Unix())) + sql = strings.ReplaceAll(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.To.UTC().Unix())) + + return sql +} + func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgconn.Result, error) { c, err := e.pool.Acquire(ctx) if err != nil { @@ -140,7 +241,7 @@ func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgc return mrr.ReadAll() } -func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup, +func (e *DataSourceHandler) executeQuery(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup, ch chan DBDataResponse, queryJSON QueryJson) { defer wg.Done() queryResult := DBDataResponse{ @@ -171,7 +272,7 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query return } - qm, err := e.newProcessCfgPGX(queryContext, query, results, interpolatedQuery) + qm, err := e.newProcessCfg(queryContext, query, results, interpolatedQuery) if err != nil { e.handleQueryError("failed to get configurations", err, interpolatedQuery, backend.ErrorSourceDownstream, ch, queryResult) return @@ -186,6 +287,47 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query e.processFrame(frame, qm, queryResult, ch, logger) } +// dataQueryFormat is the type of query. +type dataQueryFormat string + +const ( + // dataQueryFormatTable identifies a table query (default). + dataQueryFormatTable dataQueryFormat = "table" + // dataQueryFormatSeries identifies a time series query. + dataQueryFormatSeries dataQueryFormat = "time_series" +) + +type dataQueryModel struct { + InterpolatedQuery string // property not set until after Interpolate() + Format dataQueryFormat + TimeRange backend.TimeRange + FillMissing *data.FillMissing // property not set until after Interpolate() + Interval time.Duration + columnNames []string + columnTypes []string + timeIndex int + timeEndIndex int + metricIndex int + metricPrefix bool + queryContext context.Context +} + +func convertSQLTimeColumnsToEpochMS(frame *data.Frame, qm *dataQueryModel) error { + if qm.timeIndex != -1 { + if err := convertSQLTimeColumnToEpochMS(frame, qm.timeIndex); err != nil { + return fmt.Errorf("%v: %w", "failed to convert time column", err) + } + } + + if qm.timeEndIndex != -1 { + if err := convertSQLTimeColumnToEpochMS(frame, qm.timeEndIndex); err != nil { + return fmt.Errorf("%v: %w", "failed to convert timeend column", err) + } + } + + return nil +} + func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, queryResult DBDataResponse, ch chan DBDataResponse, logger log.Logger) { if frame.Meta == nil { frame.Meta = &data.FrameMeta{} @@ -281,10 +423,10 @@ func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, ch <- queryResult } -func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query backend.DataQuery, +func (e *DataSourceHandler) newProcessCfg(queryContext context.Context, query backend.DataQuery, results []*pgconn.Result, interpolatedQuery string) (*dataQueryModel, error) { columnNames := []string{} - columnTypesPGX := []string{} + columnTypes := []string{} // The results will contain column information in the metadata for _, result := range results { @@ -296,26 +438,26 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query // Handle special cases for field types switch field.DataTypeOID { case pgtype.TimetzOID: - columnTypesPGX = append(columnTypesPGX, "timetz") + columnTypes = append(columnTypes, "timetz") case 790: - columnTypesPGX = append(columnTypesPGX, "money") + columnTypes = append(columnTypes, "money") default: - columnTypesPGX = append(columnTypesPGX, "unknown") + columnTypes = append(columnTypes, "unknown") } } else { - columnTypesPGX = append(columnTypesPGX, pqtype.Name) + columnTypes = append(columnTypes, pqtype.Name) } } } qm := &dataQueryModel{ - columnTypesPGX: columnTypesPGX, - columnNames: columnNames, - timeIndex: -1, - timeEndIndex: -1, - metricIndex: -1, - metricPrefix: false, - queryContext: queryContext, + columnTypes: columnTypes, + columnNames: columnNames, + timeIndex: -1, + timeEndIndex: -1, + metricIndex: -1, + metricPrefix: false, + queryContext: queryContext, } queryJSON := QueryJson{} @@ -370,7 +512,7 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query qm.metricIndex = i default: if qm.metricIndex == -1 { - columnType := qm.columnTypesPGX[i] + columnType := qm.columnTypes[i] for _, mct := range e.metricColumnTypes { if columnType == mct { qm.metricIndex = i @@ -596,3 +738,99 @@ func getFieldTypesFromDescriptions(fieldDescriptions []pgconn.FieldDescription, } return fieldTypes, nil } + +// convertSQLTimeColumnToEpochMS converts column named time to unix timestamp in milliseconds +// to make native datetime types and epoch dates work in annotation and table queries. +func convertSQLTimeColumnToEpochMS(frame *data.Frame, timeIndex int) error { + if timeIndex < 0 || timeIndex >= len(frame.Fields) { + return fmt.Errorf("timeIndex %d is out of range", timeIndex) + } + + origin := frame.Fields[timeIndex] + valueType := origin.Type() + if valueType == data.FieldTypeTime || valueType == data.FieldTypeNullableTime { + return nil + } + + newField := data.NewFieldFromFieldType(data.FieldTypeNullableTime, 0) + newField.Name = origin.Name + newField.Labels = origin.Labels + + valueLength := origin.Len() + for i := 0; i < valueLength; i++ { + v, err := origin.NullableFloatAt(i) + if err != nil { + return fmt.Errorf("unable to convert data to a time field") + } + if v == nil { + newField.Append(nil) + } else { + timestamp := time.Unix(0, int64(epochPrecisionToMS(*v))*int64(time.Millisecond)) + newField.Append(×tamp) + } + } + frame.Fields[timeIndex] = newField + + return nil +} + +// convertSQLValueColumnToFloat converts timeseries value column to float. +func convertSQLValueColumnToFloat(frame *data.Frame, Index int) (*data.Frame, error) { + if Index < 0 || Index >= len(frame.Fields) { + return frame, fmt.Errorf("metricIndex %d is out of range", Index) + } + + origin := frame.Fields[Index] + valueType := origin.Type() + if valueType == data.FieldTypeFloat64 || valueType == data.FieldTypeNullableFloat64 { + return frame, nil + } + + newField := data.NewFieldFromFieldType(data.FieldTypeNullableFloat64, origin.Len()) + newField.Name = origin.Name + newField.Labels = origin.Labels + + for i := 0; i < origin.Len(); i++ { + v, err := origin.NullableFloatAt(i) + if err != nil { + return frame, err + } + newField.Set(i, v) + } + + frame.Fields[Index] = newField + + return frame, nil +} + +// epochPrecisionToMS converts epoch precision to millisecond, if needed. +// Only seconds to milliseconds supported right now +func epochPrecisionToMS(value float64) float64 { + s := strconv.FormatFloat(value, 'e', -1, 64) + if strings.HasSuffix(s, "e+09") { + return value * float64(1e3) + } + + if strings.HasSuffix(s, "e+18") { + return value / float64(time.Millisecond) + } + + return value +} + +func isDownstreamError(err error) bool { + if backend.IsDownstreamError(err) { + return true + } + resultProcessingDownstreamErrors := []error{ + data.ErrorInputFieldsWithoutRows, + data.ErrorSeriesUnsorted, + data.ErrorNullTimeValues, + } + for _, e := range resultProcessingDownstreamErrors { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine_test.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine_test.go new file mode 100644 index 00000000000..2f161badb7d --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine_test.go @@ -0,0 +1,681 @@ +package pgx + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/data" + "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/grafana-plugin-sdk-go/backend/log" +) + +func Pointer[T any](v T) *T { return &v } + +func TestSQLEngine(t *testing.T) { + dt := time.Date(2018, 3, 14, 21, 20, 6, int(527345*time.Microsecond), time.UTC) + + t.Run("Handle interpolating $__interval and $__interval_ms", func(t *testing.T) { + from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC) + to := from.Add(5 * time.Minute) + timeRange := backend.TimeRange{From: from, To: to} + + text := "$__interval $__timeGroupAlias(time,$__interval) $__interval_ms" + + t.Run("interpolate 10 minutes $__interval", func(t *testing.T) { + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Minute * 10} + sql := Interpolate(query, timeRange, "", text) + require.Equal(t, "10m $__timeGroupAlias(time,10m) 600000", sql) + }) + + t.Run("interpolate 4seconds $__interval", func(t *testing.T) { + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 4} + sql := Interpolate(query, timeRange, "", text) + require.Equal(t, "4s $__timeGroupAlias(time,4s) 4000", sql) + }) + + t.Run("interpolate 200 milliseconds $__interval", func(t *testing.T) { + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Millisecond * 200} + sql := Interpolate(query, timeRange, "", text) + require.Equal(t, "200ms $__timeGroupAlias(time,200ms) 200", sql) + }) + }) + + t.Run("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func(t *testing.T) { + from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC) + to := from.Add(5 * time.Minute) + timeRange := backend.TimeRange{From: from, To: to} + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 60} + + t.Run("interpolate __unixEpochFrom function", func(t *testing.T) { + sql := Interpolate(query, timeRange, "", "select $__unixEpochFrom()") + require.Equal(t, fmt.Sprintf("select %d", from.Unix()), sql) + }) + + t.Run("interpolate __unixEpochTo function", func(t *testing.T) { + sql := Interpolate(query, timeRange, "", "select $__unixEpochTo()") + require.Equal(t, fmt.Sprintf("select %d", to.Unix()), sql) + }) + }) + + t.Run("Given row values with int64 as time columns", func(t *testing.T) { + tSeconds := dt.Unix() + tMilliseconds := dt.UnixNano() / 1e6 + tNanoSeconds := dt.UnixNano() + var nilPointer *int64 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []int64{ + tSeconds, + }), + data.NewField("time2", nil, []*int64{ + Pointer(tSeconds), + }), + data.NewField("time3", nil, []int64{ + tMilliseconds, + }), + data.NewField("time4", nil, []*int64{ + Pointer(tMilliseconds), + }), + data.NewField("time5", nil, []int64{ + tNanoSeconds, + }), + data.NewField("time6", nil, []*int64{ + Pointer(tNanoSeconds), + }), + data.NewField("time7", nil, []*int64{ + nilPointer, + }), + ) + + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[6].At(0)) + }) + + t.Run("Given row values with uint64 as time columns", func(t *testing.T) { + tSeconds := uint64(dt.Unix()) + tMilliseconds := uint64(dt.UnixNano() / 1e6) + tNanoSeconds := uint64(dt.UnixNano()) + var nilPointer *uint64 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []uint64{ + tSeconds, + }), + data.NewField("time2", nil, []*uint64{ + Pointer(tSeconds), + }), + data.NewField("time3", nil, []uint64{ + tMilliseconds, + }), + data.NewField("time4", nil, []*uint64{ + Pointer(tMilliseconds), + }), + data.NewField("time5", nil, []uint64{ + tNanoSeconds, + }), + data.NewField("time6", nil, []*uint64{ + Pointer(tNanoSeconds), + }), + data.NewField("time7", nil, []*uint64{ + nilPointer, + }), + ) + + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[6].At(0)) + }) + + t.Run("Given row values with int32 as time columns", func(t *testing.T) { + tSeconds := int32(dt.Unix()) + var nilInt *int32 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []int32{ + tSeconds, + }), + data.NewField("time2", nil, []*int32{ + Pointer(tSeconds), + }), + data.NewField("time7", nil, []*int32{ + nilInt, + }), + ) + for i := 0; i < 3; i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[2].At(0)) + }) + + t.Run("Given row values with uint32 as time columns", func(t *testing.T) { + tSeconds := uint32(dt.Unix()) + var nilInt *uint32 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []uint32{ + tSeconds, + }), + data.NewField("time2", nil, []*uint32{ + Pointer(tSeconds), + }), + data.NewField("time7", nil, []*uint32{ + nilInt, + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[2].At(0)) + }) + + t.Run("Given row values with float64 as time columns", func(t *testing.T) { + tSeconds := float64(dt.UnixNano()) / float64(time.Second) + tMilliseconds := float64(dt.UnixNano()) / float64(time.Millisecond) + tNanoSeconds := float64(dt.UnixNano()) + var nilPointer *float64 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []float64{ + tSeconds, + }), + data.NewField("time2", nil, []*float64{ + Pointer(tSeconds), + }), + data.NewField("time3", nil, []float64{ + tMilliseconds, + }), + data.NewField("time4", nil, []*float64{ + Pointer(tMilliseconds), + }), + data.NewField("time5", nil, []float64{ + tNanoSeconds, + }), + data.NewField("time6", nil, []*float64{ + Pointer(tNanoSeconds), + }), + data.NewField("time7", nil, []*float64{ + nilPointer, + }), + ) + + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[6].At(0)) + }) + + t.Run("Given row values with float32 as time columns", func(t *testing.T) { + tSeconds := float32(dt.Unix()) + var nilInt *float32 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []float32{ + tSeconds, + }), + data.NewField("time2", nil, []*float32{ + Pointer(tSeconds), + }), + data.NewField("time7", nil, []*float32{ + nilInt, + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + require.Equal(t, int64(tSeconds), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, int64(tSeconds), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[2].At(0)) + }) + + t.Run("Given row with value columns, would be converted to float64", func(t *testing.T) { + originFrame := data.NewFrame("", + data.NewField("value1", nil, []int64{ + int64(1), + }), + data.NewField("value2", nil, []*int64{ + Pointer(int64(1)), + }), + data.NewField("value3", nil, []int32{ + int32(1), + }), + data.NewField("value4", nil, []*int32{ + Pointer(int32(1)), + }), + data.NewField("value5", nil, []int16{ + int16(1), + }), + data.NewField("value6", nil, []*int16{ + Pointer(int16(1)), + }), + data.NewField("value7", nil, []int8{ + int8(1), + }), + data.NewField("value8", nil, []*int8{ + Pointer(int8(1)), + }), + data.NewField("value9", nil, []float64{ + float64(1), + }), + data.NewField("value10", nil, []*float64{ + Pointer(1.0), + }), + data.NewField("value11", nil, []float32{ + float32(1), + }), + data.NewField("value12", nil, []*float32{ + Pointer(float32(1)), + }), + data.NewField("value13", nil, []uint64{ + uint64(1), + }), + data.NewField("value14", nil, []*uint64{ + Pointer(uint64(1)), + }), + data.NewField("value15", nil, []uint32{ + uint32(1), + }), + data.NewField("value16", nil, []*uint32{ + Pointer(uint32(1)), + }), + data.NewField("value17", nil, []uint16{ + uint16(1), + }), + data.NewField("value18", nil, []*uint16{ + Pointer(uint16(1)), + }), + data.NewField("value19", nil, []uint8{ + uint8(1), + }), + data.NewField("value20", nil, []*uint8{ + Pointer(uint8(1)), + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + _, err := convertSQLValueColumnToFloat(originFrame, i) + require.NoError(t, err) + if i == 8 { + require.Equal(t, float64(1), originFrame.Fields[i].At(0).(float64)) + } else { + require.NotNil(t, originFrame.Fields[i].At(0).(*float64)) + require.Equal(t, float64(1), *originFrame.Fields[i].At(0).(*float64)) + } + } + }) + + t.Run("Given row with nil value columns", func(t *testing.T) { + var int64NilPointer *int64 + var int32NilPointer *int32 + var int16NilPointer *int16 + var int8NilPointer *int8 + var float64NilPointer *float64 + var float32NilPointer *float32 + var uint64NilPointer *uint64 + var uint32NilPointer *uint32 + var uint16NilPointer *uint16 + var uint8NilPointer *uint8 + + originFrame := data.NewFrame("", + data.NewField("value1", nil, []*int64{ + int64NilPointer, + }), + data.NewField("value2", nil, []*int32{ + int32NilPointer, + }), + data.NewField("value3", nil, []*int16{ + int16NilPointer, + }), + data.NewField("value4", nil, []*int8{ + int8NilPointer, + }), + data.NewField("value5", nil, []*float64{ + float64NilPointer, + }), + data.NewField("value6", nil, []*float32{ + float32NilPointer, + }), + data.NewField("value7", nil, []*uint64{ + uint64NilPointer, + }), + data.NewField("value8", nil, []*uint32{ + uint32NilPointer, + }), + data.NewField("value9", nil, []*uint16{ + uint16NilPointer, + }), + data.NewField("value10", nil, []*uint8{ + uint8NilPointer, + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + t.Run("", func(t *testing.T) { + _, err := convertSQLValueColumnToFloat(originFrame, i) + require.NoError(t, err) + require.Nil(t, originFrame.Fields[i].At(0)) + }) + } + }) + + t.Run("Should not return raw connection errors", func(t *testing.T) { + err := net.OpError{Op: "Dial", Err: fmt.Errorf("inner-error")} + transformer := &testQueryResultTransformer{} + dp := DataSourceHandler{ + log: backend.NewLoggerWith("logger", "test"), + queryResultTransformer: transformer, + } + resultErr := dp.TransformQueryError(dp.log, &err) + assert.False(t, transformer.transformQueryErrorWasCalled) + errorText := resultErr.Error() + assert.NotEqual(t, err, resultErr) + assert.NotContains(t, errorText, "inner-error") + assert.Contains(t, errorText, "failed to connect to server") + }) + + t.Run("Should return non-connection errors unmodified", func(t *testing.T) { + err := fmt.Errorf("normal error") + transformer := &testQueryResultTransformer{} + dp := DataSourceHandler{ + log: backend.NewLoggerWith("logger", "test"), + queryResultTransformer: transformer, + } + resultErr := dp.TransformQueryError(dp.log, err) + assert.True(t, transformer.transformQueryErrorWasCalled) + assert.Equal(t, err, resultErr) + assert.ErrorIs(t, err, resultErr) + }) +} + +func TestConvertResultsToFrame(t *testing.T) { + // Import the pgx packages needed for testing + // These imports are included in the main file but need to be accessible for tests + t.Run("convertResultsToFrame with single result", func(t *testing.T) { + // Create mock field descriptions + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + {Name: "name", DataTypeOID: pgtype.TextOID}, + {Name: "value", DataTypeOID: pgtype.Float8OID}, + } + + // Create mock result data + mockRows := [][][]byte{ + {[]byte("1"), []byte("test1"), []byte("10.5")}, + {[]byte("2"), []byte("test2"), []byte("20.7")}, + } + + // Create mock result + result := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + result.CommandTag = pgconn.NewCommandTag("SELECT 2") + + results := []*pgconn.Result{result} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 3, len(frame.Fields)) + require.Equal(t, 2, frame.Rows()) + + // Verify field names + require.Equal(t, "id", frame.Fields[0].Name) + require.Equal(t, "name", frame.Fields[1].Name) + require.Equal(t, "value", frame.Fields[2].Name) + }) + + t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) { + // Create mock field descriptions (same structure for both results) + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + {Name: "name", DataTypeOID: pgtype.TextOID}, + } + + // Create first result + mockRows1 := [][][]byte{ + {[]byte("1"), []byte("test1")}, + {[]byte("2"), []byte("test2")}, + } + result1 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows1, + } + result1.CommandTag = pgconn.NewCommandTag("SELECT 2") + + // Create second result with same structure + mockRows2 := [][][]byte{ + {[]byte("3"), []byte("test3")}, + {[]byte("4"), []byte("test4")}, + } + result2 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows2, + } + result2.CommandTag = pgconn.NewCommandTag("SELECT 2") + + results := []*pgconn.Result{result1, result2} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 2, len(frame.Fields)) + require.Equal(t, 4, frame.Rows()) // Should have rows from both results + + // Verify field names + require.Equal(t, "id", frame.Fields[0].Name) + require.Equal(t, "name", frame.Fields[1].Name) + }) + + t.Run("convertResultsToFrame with row limit", func(t *testing.T) { + // Create mock field descriptions + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + } + + // Create mock result data with 3 rows + mockRows := [][][]byte{ + {[]byte("1")}, + {[]byte("2")}, + {[]byte("3")}, + } + + result := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + result.CommandTag = pgconn.NewCommandTag("SELECT 3") + + results := []*pgconn.Result{result} + + // Set row limit to 2 + frame, err := convertResultsToFrame(results, 2) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 1, len(frame.Fields)) + require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows + + // Should have a notice about the limit + require.NotNil(t, frame.Meta) + require.Len(t, frame.Meta.Notices, 1) + require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") + }) + + t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) { + // Create a non-SELECT result (should be skipped) + nonSelectResult := &pgconn.Result{} + nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1") + + // Create a SELECT result + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + } + mockRows := [][][]byte{ + {[]byte("1")}, + } + selectResult := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1") + + results := []*pgconn.Result{nonSelectResult, selectResult} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 1, len(frame.Fields)) + require.Equal(t, 1, frame.Rows()) + }) + + t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) { + // Create only non-SELECT results + result1 := &pgconn.Result{} + result1.CommandTag = pgconn.NewCommandTag("UPDATE 1") + + result2 := &pgconn.Result{} + result2.CommandTag = pgconn.NewCommandTag("INSERT 1") + + results := []*pgconn.Result{result1, result2} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 0, len(frame.Fields)) + require.Equal(t, 0, frame.Rows()) + }) + + t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) { + // Create mock field descriptions (same structure for both results) + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + } + + // Create first result with 3 rows + mockRows1 := [][][]byte{ + {[]byte("1")}, + {[]byte("2")}, + {[]byte("3")}, + } + result1 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows1, + } + result1.CommandTag = pgconn.NewCommandTag("SELECT 3") + + // Create second result with 3 rows + mockRows2 := [][][]byte{ + {[]byte("4")}, + {[]byte("5")}, + {[]byte("6")}, + } + result2 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows2, + } + result2.CommandTag = pgconn.NewCommandTag("SELECT 3") + + results := []*pgconn.Result{result1, result2} + + // Set row limit to 2 (should limit each result to 2 rows) + frame, err := convertResultsToFrame(results, 2) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 1, len(frame.Fields)) + require.Equal(t, 4, frame.Rows()) // 2 rows from each result + + // Should have notices about the limit from both results + require.NotNil(t, frame.Meta) + require.Len(t, frame.Meta.Notices, 2) + require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") + require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2") + }) + + t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) { + // Create mock field descriptions + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + {Name: "name", DataTypeOID: pgtype.TextOID}, + } + + // Create mock result data with null values + mockRows := [][][]byte{ + {[]byte("1"), nil}, // null name + {nil, []byte("test2")}, // null id + } + + result := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + result.CommandTag = pgconn.NewCommandTag("SELECT 2") + + results := []*pgconn.Result{result} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 2, len(frame.Fields)) + require.Equal(t, 2, frame.Rows()) + + // Check that null values are handled correctly + // The exact representation depends on the field type, but should not panic + require.NotPanics(t, func() { + frame.Fields[0].At(1) // null id + frame.Fields[1].At(0) // null name + }) + }) +} + +type testQueryResultTransformer struct { + transformQueryErrorWasCalled bool +} + +func (t *testQueryResultTransformer) TransformQueryError(_ log.Logger, err error) error { + t.transformQueryErrorWasCalled = true + return err +} + +func (t *testQueryResultTransformer) GetConverterList() []sqlutil.StringConverter { + return nil +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres.go b/pkg/tsdb/grafana-postgresql-datasource/postgres.go index a889ef662a7..ca4b4eb5565 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres.go @@ -16,56 +16,14 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" "github.com/grafana/grafana/pkg/services/featuremgmt" - "github.com/grafana/grafana/pkg/setting" "github.com/jackc/pgx/v5/pgxpool" "github.com/lib/pq" "github.com/grafana/grafana-plugin-sdk-go/backend/log" + sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx" "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" ) -func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service { - logger := backend.NewLoggerWith("logger", "tsdb.postgres") - s := &Service{ - tlsManager: newTLSManager(logger, cfg.DataPath), - pgxTlsManager: newPgxTlsManager(logger), - logger: logger, - features: features, - } - s.im = datasource.NewInstanceManager(s.newInstanceSettings()) - return s -} - -type Service struct { - tlsManager tlsSettingsProvider - pgxTlsManager *pgxTlsManager - im instancemgmt.InstanceManager - logger log.Logger - features featuremgmt.FeatureToggles -} - -func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) { - i, err := s.im.Get(ctx, pluginCtx) - if err != nil { - return nil, err - } - instance := i.(*sqleng.DataSourceHandler) - return instance, nil -} - -func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { - dsInfo, err := s.getDSInfo(ctx, req.PluginContext) - if err != nil { - return nil, err - } - - if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { - return dsInfo.QueryDataPGX(ctx, req) - } - - return dsInfo.QueryData(ctx, req) -} - func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*sql.DB, *sqleng.DataSourceHandler, error) { connector, err := pq.NewConnector(cnnstr) if err != nil { @@ -115,7 +73,7 @@ func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit in return db, handler, nil } -func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqleng.DataSourceHandler, error) { +func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqlengpgx.DataSourceHandler, error) { pgxConf, err := pgxpool.ParseConfig(cnnstr) if err != nil { logger.Error("postgres config creation failed", "error", err) @@ -144,7 +102,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit return []string{host}, nil } - config := sqleng.DataPluginConfiguration{ + config := sqlengpgx.DataPluginConfiguration{ DSInfo: dsInfo, MetricColumnTypes: []string{"unknown", "text", "varchar", "char", "bpchar"}, RowLimit: rowLimit, @@ -160,7 +118,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit return nil, nil, err } - handler, err := sqleng.NewQueryDataHandlerPGX(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb), + handler, err := sqlengpgx.NewQueryDataHandler(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb), logger) if err != nil { logger.Error("Failed connecting to Postgres", "err", err) @@ -171,8 +129,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit return p, handler, nil } -func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc { - logger := s.logger +func NewInstanceSettings(logger log.Logger, features featuremgmt.FeatureToggles, dataPath string) datasource.InstanceFactoryFunc { return func(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { cfg := backend.GrafanaConfigFromContext(ctx) sqlCfg, err := cfg.SQL() @@ -210,49 +167,53 @@ func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc { DecryptedSecureJSONData: settings.DecryptedSecureJSONData, } - isPGX := s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) + isPGX := features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) userFacingDefaultError, err := cfg.UserFacingDefaultError() if err != nil { return nil, err } - var handler instancemgmt.Instance if isPGX { - pgxTlsSettings, err := s.pgxTlsManager.getTLSSettings(dsInfo) + pgxlogger := logger.FromContext(ctx).With("driver", "pgx") + pgxTlsManager := newPgxTlsManager(pgxlogger) + pgxTlsSettings, err := pgxTlsManager.getTLSSettings(dsInfo) if err != nil { return "", err } // Ensure cleanupCertFiles is called after the connection is opened - defer s.pgxTlsManager.cleanupCertFiles(pgxTlsSettings) - cnnstr, err := s.generateConnectionString(dsInfo, pgxTlsSettings, isPGX) + defer pgxTlsManager.cleanupCertFiles(pgxTlsSettings) + cnnstr, err := generateConnectionString(dsInfo, pgxTlsSettings, isPGX, pgxlogger) if err != nil { return "", err } - _, handler, err = newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings) + _, handler, err := newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pgxlogger, settings) if err != nil { - logger.Error("Failed connecting to Postgres", "err", err) + pgxlogger.Error("Failed connecting to Postgres", "err", err) return nil, err } + pgxlogger.Debug("Successfully connected to Postgres") + return handler, nil } else { - tlsSettings, err := s.tlsManager.getTLSSettings(dsInfo) + pqlogger := logger.FromContext(ctx).With("driver", "libpq") + tlsManager := newTLSManager(pqlogger, dataPath) + tlsSettings, err := tlsManager.getTLSSettings(dsInfo) if err != nil { return "", err } - cnnstr, err := s.generateConnectionString(dsInfo, tlsSettings, isPGX) + cnnstr, err := generateConnectionString(dsInfo, tlsSettings, isPGX, pqlogger) if err != nil { return nil, err } - _, handler, err = newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings) + _, handler, err := newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pqlogger, settings) if err != nil { - logger.Error("Failed connecting to Postgres", "err", err) + pqlogger.Error("Failed connecting to Postgres", "err", err) return nil, err } + pqlogger.Debug("Successfully connected to Postgres") + return handler, nil } - - logger.Debug("Successfully connected to Postgres") - return handler, nil } } @@ -342,9 +303,7 @@ func buildBaseConnectionString(params connectionParams) string { return connStr } -func (s *Service) generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool) (string, error) { - logger := s.logger - +func generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool, logger log.Logger) (string, error) { params, err := parseConnectionParams(dsInfo, logger) if err != nil { return "", err @@ -387,15 +346,6 @@ func (t *postgresQueryResultTransformer) TransformQueryError(_ log.Logger, err e return err } -// CheckHealth pings the connected SQL database -func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { - dsHandler, err := s.getDSInfo(ctx, req.PluginContext) - if err != nil { - return sqleng.ErrToHealthCheckResult(err) - } - return dsHandler.CheckHealth(ctx, req, s.features) -} - func (t *postgresQueryResultTransformer) GetConverterList() []sqlutil.StringConverter { return []sqlutil.StringConverter{ { diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go index 7988a3b8515..f65267dbb2f 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go @@ -186,7 +186,7 @@ func TestIntegrationPostgresPGXSnapshots(t *testing.T) { query := makeQuery(rawSQL, test.format) - result, err := handler.QueryDataPGX(context.Background(), &query) + result, err := handler.QueryData(context.Background(), &query) require.Len(t, result.Responses, 1) response, found := result.Responses["A"] require.True(t, found) diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go index 6e14282d824..7c66bde7580 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go @@ -151,10 +151,6 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) { } for _, tt := range testCases { t.Run(tt.desc, func(t *testing.T) { - svc := Service{ - logger: backend.NewLoggerWith("logger", "tsdb.postgres"), - } - ds := sqleng.DataSourceInfo{ URL: tt.host, User: tt.user, @@ -162,8 +158,9 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) { Database: tt.database, UID: tt.uid, } + logger := backend.NewLoggerWith("logger", "tsdb.postgres") - connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false) + connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger) if tt.expErr == "" { require.NoError(t, err, tt.desc) @@ -284,7 +281,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -383,7 +380,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -426,7 +423,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -460,7 +457,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] frames := queryResult.Frames @@ -488,7 +485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -542,7 +539,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -589,7 +586,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -624,7 +621,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -741,7 +738,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -765,7 +762,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -789,7 +786,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -813,7 +810,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -837,7 +834,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -861,7 +858,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -885,7 +882,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -910,7 +907,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -934,7 +931,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -959,7 +956,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -991,7 +988,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1026,7 +1023,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1086,7 +1083,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["Deploys"] @@ -1113,7 +1110,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["Tickets"] @@ -1136,7 +1133,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1161,7 +1158,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1186,7 +1183,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1212,7 +1209,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1238,7 +1235,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1264,7 +1261,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1290,7 +1287,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1338,7 +1335,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := handler.QueryDataPGX(t.Context(), query) + resp, err := handler.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1368,7 +1365,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := handler.QueryDataPGX(t.Context(), query) + resp, err := handler.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1406,7 +1403,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] @@ -1453,7 +1450,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic and should work correctly - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1488,7 +1485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic anymore, but should return an error instead - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] @@ -1517,7 +1514,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic anymore, but should return an error instead - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] @@ -1546,7 +1543,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_service.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_service.go new file mode 100644 index 00000000000..5a1050ba3a5 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_service.go @@ -0,0 +1,80 @@ +package postgres + +import ( + "context" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" + "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" + + "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/setting" + sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" +) + +type Service struct { + im instancemgmt.InstanceManager + features featuremgmt.FeatureToggles +} + +func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service { + logger := backend.NewLoggerWith("logger", "tsdb.postgres") + s := &Service{ + im: datasource.NewInstanceManager(NewInstanceSettings(logger, features, cfg.DataPath)), + features: features, + } + return s +} + +// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside" +func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { + dsHandler, err := s.getDSInfoPGX(ctx, req.PluginContext) + if err != nil { + return sqlengpgx.ErrToHealthCheckResult(err) + } + return dsHandler.CheckHealth(ctx, req) + } else { + dsHandler, err := s.getDSInfo(ctx, req.PluginContext) + if err != nil { + return sqleng.ErrToHealthCheckResult(err) + } + return dsHandler.CheckHealth(ctx, req) + } +} + +// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside" +func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { + if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { + dsInfo, err := s.getDSInfoPGX(ctx, req.PluginContext) + if err != nil { + return nil, err + } + return dsInfo.QueryData(ctx, req) + } else { + dsInfo, err := s.getDSInfo(ctx, req.PluginContext) + if err != nil { + return nil, err + } + return dsInfo.QueryData(ctx, req) + } +} + +func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) { + i, err := s.im.Get(ctx, pluginCtx) + if err != nil { + return nil, err + } + instance := i.(*sqleng.DataSourceHandler) + return instance, nil +} + +func (s *Service) getDSInfoPGX(ctx context.Context, pluginCtx backend.PluginContext) (*sqlengpgx.DataSourceHandler, error) { + i, err := s.im.Get(ctx, pluginCtx) + if err != nil { + return nil, err + } + instance := i.(*sqlengpgx.DataSourceHandler) + return instance, nil +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go index b71bd23b387..7d6e99fcb92 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go @@ -156,10 +156,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) { } for _, tt := range testCases { t.Run(tt.desc, func(t *testing.T) { - svc := Service{ - tlsManager: &tlsTestManager{settings: tt.tlsSettings}, - logger: backend.NewLoggerWith("logger", "tsdb.postgres"), - } + logger := backend.NewLoggerWith("logger", "tsdb.postgres") ds := sqleng.DataSourceInfo{ URL: tt.host, @@ -169,7 +166,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) { UID: tt.uid, } - connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false) + connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger) if tt.expErr == "" { require.NoError(t, err, tt.desc) @@ -1409,14 +1406,6 @@ func genTimeRangeByInterval(from time.Time, duration time.Duration, interval tim return timeRange } -type tlsTestManager struct { - settings tlsSettings -} - -func (m *tlsTestManager) getTLSSettings(dsInfo sqleng.DataSourceInfo) (tlsSettings, error) { - return m.settings, nil -} - func isTestDbPostgres() bool { if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present { return db == "postgres" diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go b/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go index 99d36aaaa85..224d00eace5 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go +++ b/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go @@ -10,17 +10,11 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/log" - "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/lib/pq" ) -func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest, features featuremgmt.FeatureToggles) (*backend.CheckHealthResult, error) { - var err error - if features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { - err = e.PingPGX(ctx) - } else { - err = e.Ping() - } +func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + err := e.Ping() if err != nil { logCheckHealthError(ctx, e.dsInfo, err) if strings.EqualFold(req.PluginContext.User.Role, "Admin") { diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go index ca3a9dff36c..fdaac59c6f1 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go +++ b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go @@ -19,7 +19,6 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" - "github.com/jackc/pgx/v5/pgxpool" ) // MetaKeyExecutedQueryString is the key where the executed query should get stored @@ -89,7 +88,6 @@ type DataSourceHandler struct { dsInfo DataSourceInfo rowLimit int64 userError string - pool *pgxpool.Pool } type QueryJson struct { @@ -490,7 +488,6 @@ type dataQueryModel struct { Interval time.Duration columnNames []string columnTypes []*sql.ColumnType - columnTypesPGX []string timeIndex int timeEndIndex int metricIndex int diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go index f0f5f5b7a9b..4d511a53f25 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go @@ -9,8 +9,6 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" - "github.com/jackc/pgx/v5/pgconn" - "github.com/jackc/pgx/v5/pgtype" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -427,246 +425,6 @@ func TestSQLEngine(t *testing.T) { }) } -func TestConvertResultsToFrame(t *testing.T) { - // Import the pgx packages needed for testing - // These imports are included in the main file but need to be accessible for tests - t.Run("convertResultsToFrame with single result", func(t *testing.T) { - // Create mock field descriptions - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - {Name: "name", DataTypeOID: pgtype.TextOID}, - {Name: "value", DataTypeOID: pgtype.Float8OID}, - } - - // Create mock result data - mockRows := [][][]byte{ - {[]byte("1"), []byte("test1"), []byte("10.5")}, - {[]byte("2"), []byte("test2"), []byte("20.7")}, - } - - // Create mock result - result := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - result.CommandTag = pgconn.NewCommandTag("SELECT 2") - - results := []*pgconn.Result{result} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 3, len(frame.Fields)) - require.Equal(t, 2, frame.Rows()) - - // Verify field names - require.Equal(t, "id", frame.Fields[0].Name) - require.Equal(t, "name", frame.Fields[1].Name) - require.Equal(t, "value", frame.Fields[2].Name) - }) - - t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) { - // Create mock field descriptions (same structure for both results) - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - {Name: "name", DataTypeOID: pgtype.TextOID}, - } - - // Create first result - mockRows1 := [][][]byte{ - {[]byte("1"), []byte("test1")}, - {[]byte("2"), []byte("test2")}, - } - result1 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows1, - } - result1.CommandTag = pgconn.NewCommandTag("SELECT 2") - - // Create second result with same structure - mockRows2 := [][][]byte{ - {[]byte("3"), []byte("test3")}, - {[]byte("4"), []byte("test4")}, - } - result2 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows2, - } - result2.CommandTag = pgconn.NewCommandTag("SELECT 2") - - results := []*pgconn.Result{result1, result2} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 2, len(frame.Fields)) - require.Equal(t, 4, frame.Rows()) // Should have rows from both results - - // Verify field names - require.Equal(t, "id", frame.Fields[0].Name) - require.Equal(t, "name", frame.Fields[1].Name) - }) - - t.Run("convertResultsToFrame with row limit", func(t *testing.T) { - // Create mock field descriptions - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - } - - // Create mock result data with 3 rows - mockRows := [][][]byte{ - {[]byte("1")}, - {[]byte("2")}, - {[]byte("3")}, - } - - result := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - result.CommandTag = pgconn.NewCommandTag("SELECT 3") - - results := []*pgconn.Result{result} - - // Set row limit to 2 - frame, err := convertResultsToFrame(results, 2) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 1, len(frame.Fields)) - require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows - - // Should have a notice about the limit - require.NotNil(t, frame.Meta) - require.Len(t, frame.Meta.Notices, 1) - require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") - }) - - t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) { - // Create a non-SELECT result (should be skipped) - nonSelectResult := &pgconn.Result{} - nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1") - - // Create a SELECT result - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - } - mockRows := [][][]byte{ - {[]byte("1")}, - } - selectResult := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1") - - results := []*pgconn.Result{nonSelectResult, selectResult} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 1, len(frame.Fields)) - require.Equal(t, 1, frame.Rows()) - }) - - t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) { - // Create only non-SELECT results - result1 := &pgconn.Result{} - result1.CommandTag = pgconn.NewCommandTag("UPDATE 1") - - result2 := &pgconn.Result{} - result2.CommandTag = pgconn.NewCommandTag("INSERT 1") - - results := []*pgconn.Result{result1, result2} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 0, len(frame.Fields)) - require.Equal(t, 0, frame.Rows()) - }) - - t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) { - // Create mock field descriptions (same structure for both results) - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - } - - // Create first result with 3 rows - mockRows1 := [][][]byte{ - {[]byte("1")}, - {[]byte("2")}, - {[]byte("3")}, - } - result1 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows1, - } - result1.CommandTag = pgconn.NewCommandTag("SELECT 3") - - // Create second result with 3 rows - mockRows2 := [][][]byte{ - {[]byte("4")}, - {[]byte("5")}, - {[]byte("6")}, - } - result2 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows2, - } - result2.CommandTag = pgconn.NewCommandTag("SELECT 3") - - results := []*pgconn.Result{result1, result2} - - // Set row limit to 2 (should limit each result to 2 rows) - frame, err := convertResultsToFrame(results, 2) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 1, len(frame.Fields)) - require.Equal(t, 4, frame.Rows()) // 2 rows from each result - - // Should have notices about the limit from both results - require.NotNil(t, frame.Meta) - require.Len(t, frame.Meta.Notices, 2) - require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") - require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2") - }) - - t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) { - // Create mock field descriptions - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - {Name: "name", DataTypeOID: pgtype.TextOID}, - } - - // Create mock result data with null values - mockRows := [][][]byte{ - {[]byte("1"), nil}, // null name - {nil, []byte("test2")}, // null id - } - - result := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - result.CommandTag = pgconn.NewCommandTag("SELECT 2") - - results := []*pgconn.Result{result} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 2, len(frame.Fields)) - require.Equal(t, 2, frame.Rows()) - - // Check that null values are handled correctly - // The exact representation depends on the field type, but should not panic - require.NotPanics(t, func() { - frame.Fields[0].At(1) // null id - frame.Fields[1].At(0) // null name - }) - }) -} - type testQueryResultTransformer struct { transformQueryErrorWasCalled bool } diff --git a/pkg/tsdb/grafana-postgresql-datasource/standalone/main.go b/pkg/tsdb/grafana-postgresql-datasource/standalone/main.go new file mode 100644 index 00000000000..c13adbb7825 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/standalone/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "os" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" + "github.com/grafana/grafana-plugin-sdk-go/backend/log" + "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/setting" + postgres "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource" +) + +func main() { + // No need to pass logger name, it will be set by the plugin SDK + logger := backend.NewLoggerWith() + // TODO: get rid of setting.NewCfg() and featuremgmt.FeatureToggles once PostgresDSUsePGX is removed + cfg := setting.NewCfg() + // We want to enable the feature toggle for api server + features := featuremgmt.WithFeatures(featuremgmt.FlagPostgresDSUsePGX) + if err := datasource.Manage("grafana-postgresql-datasource", postgres.NewInstanceSettings(logger, features, cfg.DataPath), datasource.ManageOpts{}); err != nil { + log.DefaultLogger.Error(err.Error()) + os.Exit(1) + } +} diff --git a/pkg/tsdb/graphite/resource_handler.go b/pkg/tsdb/graphite/resource_handler.go index 2651c320d65..4a3fe29282c 100644 --- a/pkg/tsdb/graphite/resource_handler.go +++ b/pkg/tsdb/graphite/resource_handler.go @@ -294,7 +294,13 @@ func (s *Service) handleFunctions(ctx context.Context, dsInfo *datasourceInfo, _ _, rawBody, statusCode, err := doGraphiteRequest[map[string]any](ctx, dsInfo, s.logger, req, true) if err != nil { - return nil, statusCode, fmt.Errorf("version request failed: %v", err) + return nil, statusCode, fmt.Errorf("functions request failed: %v", err) + } + + // It's possible that a HTML response may be returned + // This isn't valid so we'll return an error and use the default functions + if strings.HasPrefix(string(*rawBody), "<") { + return []byte{}, http.StatusNotAcceptable, fmt.Errorf("invalid functions response received from Graphite") } if rawBody == nil { diff --git a/pkg/tsdb/graphite/resource_handler_test.go b/pkg/tsdb/graphite/resource_handler_test.go index 66777f95072..c8ee9da1293 100644 --- a/pkg/tsdb/graphite/resource_handler_test.go +++ b/pkg/tsdb/graphite/resource_handler_test.go @@ -735,21 +735,41 @@ func TestHandleFunctions(t *testing.T) { responseBody: `{"error": "internal error"}`, statusCode: 500, expectError: true, - errorContains: "version request failed", + errorContains: "functions request failed", }, { name: "functions request not found", responseBody: `{"error": "not found"}`, statusCode: 404, expectError: true, - errorContains: "version request failed", + errorContains: "functions request failed", }, { name: "network error", responseBody: "", statusCode: 0, expectError: true, - errorContains: "version request failed", + errorContains: "functions request failed", + }, + { + name: "html response", + responseBody: ` + + Graphite Browser + + + + + + + + + + + `, + statusCode: 200, + expectError: true, + errorContains: "invalid functions response received from Graphite", }, } diff --git a/pkg/util/sqlite/sqlite_nocgo.go b/pkg/util/sqlite/sqlite_nocgo.go index d4bf68f0227..f283f858d32 100644 --- a/pkg/util/sqlite/sqlite_nocgo.go +++ b/pkg/util/sqlite/sqlite_nocgo.go @@ -67,7 +67,7 @@ func convertSQLite3URL(dsn string) (string, error) { newDSN := dsn[:pos] q := url.Values{} - q.Add("_pragma", "busy_timeout(5000)") + q.Add("_pragma", "busy_timeout(7500)") // Default of mattn/go-sqlite3 is 5s but we increase it to 7.5s to try and avoid busy errors. for key, values := range params { if alias, ok := dsnAlias[strings.ToLower(key)]; ok { diff --git a/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts b/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts index ac0a34e9af5..2ea0fd00730 100644 --- a/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts +++ b/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts @@ -14,12 +14,12 @@ const injectedRtkApi = api query: (queryArg) => ({ url: `/preferences`, params: { + pretty: queryArg.pretty, allowWatchBookmarks: queryArg.allowWatchBookmarks, continue: queryArg['continue'], fieldSelector: queryArg.fieldSelector, labelSelector: queryArg.labelSelector, limit: queryArg.limit, - pretty: queryArg.pretty, resourceVersion: queryArg.resourceVersion, resourceVersionMatch: queryArg.resourceVersionMatch, sendInitialEvents: queryArg.sendInitialEvents, @@ -29,6 +29,20 @@ const injectedRtkApi = api }), providesTags: ['Preferences'], }), + createPreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences`, + method: 'POST', + body: queryArg.preferences, + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + fieldManager: queryArg.fieldManager, + fieldValidation: queryArg.fieldValidation, + }, + }), + invalidatesTags: ['Preferences'], + }), mergedPreferences: build.query({ query: () => ({ url: `/preferences/merged` }), providesTags: ['Preferences'], @@ -42,6 +56,50 @@ const injectedRtkApi = api }), providesTags: ['Preferences'], }), + replacePreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences/${queryArg.name}`, + method: 'PUT', + body: queryArg.preferences, + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + fieldManager: queryArg.fieldManager, + fieldValidation: queryArg.fieldValidation, + }, + }), + invalidatesTags: ['Preferences'], + }), + deletePreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences/${queryArg.name}`, + method: 'DELETE', + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + gracePeriodSeconds: queryArg.gracePeriodSeconds, + ignoreStoreReadErrorWithClusterBreakingPotential: queryArg.ignoreStoreReadErrorWithClusterBreakingPotential, + orphanDependents: queryArg.orphanDependents, + propagationPolicy: queryArg.propagationPolicy, + }, + }), + invalidatesTags: ['Preferences'], + }), + updatePreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences/${queryArg.name}`, + method: 'PATCH', + body: queryArg.patch, + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + fieldManager: queryArg.fieldManager, + fieldValidation: queryArg.fieldValidation, + force: queryArg.force, + }, + }), + invalidatesTags: ['Preferences'], + }), listStars: build.query({ query: (queryArg) => ({ url: `/stars`, @@ -173,6 +231,8 @@ export type GetApiResourcesApiResponse = /** status 200 OK */ ApiResourceList; export type GetApiResourcesApiArg = void; export type ListPreferencesApiResponse = /** status 200 OK */ PreferencesList; export type ListPreferencesApiArg = { + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; /** allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. */ allowWatchBookmarks?: boolean; /** The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". @@ -187,8 +247,6 @@ export type ListPreferencesApiArg = { The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. */ limit?: number; - /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ - pretty?: string; /** resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset */ @@ -216,6 +274,21 @@ export type ListPreferencesApiArg = { /** Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. */ watch?: boolean; }; +export type CreatePreferencesApiResponse = /** status 200 OK */ + | Preferences + | /** status 201 Created */ Preferences + | /** status 202 Accepted */ Preferences; +export type CreatePreferencesApiArg = { + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */ + fieldManager?: string; + /** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */ + fieldValidation?: string; + preferences: Preferences; +}; export type MergedPreferencesApiResponse = /** status 200 undefined */ any; export type MergedPreferencesApiArg = void; export type GetPreferencesApiResponse = /** status 200 OK */ Preferences; @@ -225,6 +298,53 @@ export type GetPreferencesApiArg = { /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ pretty?: string; }; +export type ReplacePreferencesApiResponse = /** status 200 OK */ Preferences | /** status 201 Created */ Preferences; +export type ReplacePreferencesApiArg = { + /** name of the Preferences */ + name: string; + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */ + fieldManager?: string; + /** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */ + fieldValidation?: string; + preferences: Preferences; +}; +export type DeletePreferencesApiResponse = /** status 200 OK */ Status | /** status 202 Accepted */ Status; +export type DeletePreferencesApiArg = { + /** name of the Preferences */ + name: string; + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. */ + gracePeriodSeconds?: number; + /** if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it */ + ignoreStoreReadErrorWithClusterBreakingPotential?: boolean; + /** Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. */ + orphanDependents?: boolean; + /** Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. */ + propagationPolicy?: string; +}; +export type UpdatePreferencesApiResponse = /** status 200 OK */ Preferences | /** status 201 Created */ Preferences; +export type UpdatePreferencesApiArg = { + /** name of the Preferences */ + name: string; + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */ + fieldManager?: string; + /** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */ + fieldValidation?: string; + /** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */ + force?: boolean; + patch: Patch; +}; export type ListStarsApiResponse = /** status 200 OK */ StarsList; export type ListStarsApiArg = { /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ @@ -558,28 +678,6 @@ export type PreferencesSpec = { /** day of the week (sunday, monday, etc) */ weekStart?: string; }; -export type PreferencesstatusOperatorState = { - /** descriptiveState is an optional more descriptive state field which has no requirements on format */ - descriptiveState?: string; - /** details contains any extra information that is operator-specific */ - details?: { - [key: string]: object; - }; - /** lastEvaluation is the ResourceVersion last evaluated */ - lastEvaluation: string; - /** state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation. */ - state: string; -}; -export type PreferencesStatus = { - /** additionalFields is reserved for future use */ - additionalFields?: { - [key: string]: object; - }; - /** operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field. */ - operatorStates?: { - [key: string]: PreferencesstatusOperatorState; - }; -}; export type Preferences = { /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ apiVersion?: string; @@ -588,7 +686,6 @@ export type Preferences = { metadata: ObjectMeta; /** Spec is the spec of the Preferences */ spec: PreferencesSpec; - status: PreferencesStatus; }; export type ListMeta = { /** continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. */ @@ -608,55 +705,6 @@ export type PreferencesList = { kind?: string; metadata: ListMeta; }; -export type StarsResource = { - group: string; - kind: string; - /** The set of resources */ - names: string[]; -}; -export type StarsSpec = { - resource: StarsResource[]; -}; -export type StarsstatusOperatorState = { - /** descriptiveState is an optional more descriptive state field which has no requirements on format */ - descriptiveState?: string; - /** details contains any extra information that is operator-specific */ - details?: { - [key: string]: object; - }; - /** lastEvaluation is the ResourceVersion last evaluated */ - lastEvaluation: string; - /** state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation. */ - state: string; -}; -export type StarsStatus = { - /** additionalFields is reserved for future use */ - additionalFields?: { - [key: string]: object; - }; - /** operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field. */ - operatorStates?: { - [key: string]: StarsstatusOperatorState; - }; -}; -export type Stars = { - /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; - /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata: ObjectMeta; - /** Spec is the spec of the Stars */ - spec: StarsSpec; - status: StarsStatus; -}; -export type StarsList = { - /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; - items: Stars[]; - /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata: ListMeta; -}; export type StatusCause = { /** The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. @@ -702,11 +750,41 @@ export type Status = { status?: string; }; export type Patch = object; +export type StarsResource = { + group: string; + kind: string; + /** The set of resources */ + names: string[]; +}; +export type StarsSpec = { + resource: StarsResource[]; +}; +export type Stars = { + /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ + apiVersion?: string; + /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ + kind?: string; + metadata: ObjectMeta; + /** Spec is the spec of the Stars */ + spec: StarsSpec; +}; +export type StarsList = { + /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ + apiVersion?: string; + items: Stars[]; + /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ + kind?: string; + metadata: ListMeta; +}; export const { useGetApiResourcesQuery, useListPreferencesQuery, + useCreatePreferencesMutation, useMergedPreferencesQuery, useGetPreferencesQuery, + useReplacePreferencesMutation, + useDeletePreferencesMutation, + useUpdatePreferencesMutation, useListStarsQuery, useCreateStarsMutation, useDeletecollectionStarsMutation, diff --git a/public/app/features/browse-dashboards/components/FolderActionsButton.tsx b/public/app/features/browse-dashboards/components/FolderActionsButton.tsx index dd88bf2679d..ec093e3aa69 100644 --- a/public/app/features/browse-dashboards/components/FolderActionsButton.tsx +++ b/public/app/features/browse-dashboards/components/FolderActionsButton.tsx @@ -7,7 +7,9 @@ import { Button, Drawer, Dropdown, Icon, Menu, MenuItem } from '@grafana/ui'; import { Permissions } from 'app/core/components/AccessControl'; import { appEvents } from 'app/core/core'; import { RepoType } from 'app/features/provisioning/Wizard/types'; +import { BulkMoveProvisionedResource } from 'app/features/provisioning/components/BulkActions/BulkMoveProvisionedResource'; import { DeleteProvisionedFolderForm } from 'app/features/provisioning/components/Folders/DeleteProvisionedFolderForm'; +import { useIsProvisionedInstance } from 'app/features/provisioning/hooks/useIsProvisionedInstance'; import { getReadOnlyTooltipText } from 'app/features/provisioning/utils/repository'; import { ShowModalReactEvent } from 'app/types/events'; import { FolderDTO } from 'app/types/folders'; @@ -29,14 +31,18 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) const [isOpen, setIsOpen] = useState(false); const [showPermissionsDrawer, setShowPermissionsDrawer] = useState(false); const [showDeleteProvisionedFolderDrawer, setShowDeleteProvisionedFolderDrawer] = useState(false); + const [showMoveProvisionedFolderDrawer, setShowMoveProvisionedFolderDrawer] = useState(false); const [moveFolder] = useMoveFolderMutationFacade(); + const isProvisionedInstance = useIsProvisionedInstance(); const deleteFolder = useDeleteFolderMutationFacade(); const { canEditFolders, canDeleteFolders, canViewPermissions, canSetPermissions } = getFolderPermissions(folder); const isProvisionedFolder = folder.managedBy === ManagerKind.Repo; + // When its single provisioned folder, cannot move the root repository folder + const isProvisionedRootFolder = isProvisionedFolder && !isProvisionedInstance && folder.parentUid === undefined; // Can only move folders when the folder is not provisioned - const canMoveFolder = canEditFolders && !isProvisionedFolder; + const canMoveFolder = canEditFolders && !isProvisionedRootFolder; const onMove = async (destinationUID: string) => { await moveFolder({ folderUID: folder.uid, destinationUID: destinationUID }); @@ -115,6 +121,10 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) setShowDeleteProvisionedFolderDrawer(true); }; + const handleShowMoveProvisionedFolderDrawer = () => { + setShowMoveProvisionedFolderDrawer(true); + }; + const managePermissionsLabel = t('browse-dashboards.folder-actions-button.manage-permissions', 'Manage permissions'); const moveLabel = t('browse-dashboards.folder-actions-button.move', 'Move'); const deleteLabel = t('browse-dashboards.folder-actions-button.delete', 'Delete'); @@ -122,7 +132,12 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) const menu = ( {canViewPermissions && setShowPermissionsDrawer(true)} label={managePermissionsLabel} />} - {canMoveFolder && !isReadOnlyRepo && } + {canMoveFolder && !isReadOnlyRepo && ( + + )} {canDeleteFolders && !isReadOnlyRepo && ( )} + {showMoveProvisionedFolderDrawer && ( + setShowMoveProvisionedFolderDrawer(false)} + > + setShowMoveProvisionedFolderDrawer(false)} + /> + + )} ); } diff --git a/public/app/features/dashboard-scene/scene/DashboardScene.tsx b/public/app/features/dashboard-scene/scene/DashboardScene.tsx index be793d5f1d1..f918af82afa 100644 --- a/public/app/features/dashboard-scene/scene/DashboardScene.tsx +++ b/public/app/features/dashboard-scene/scene/DashboardScene.tsx @@ -589,13 +589,14 @@ export class DashboardScene extends SceneObjectBase impleme this.setState({ overlay: undefined }); } - public async onStarDashboard() { + public async onStarDashboard(isStarred?: boolean) { const { meta, uid } = this.state; + isStarred = isStarred ?? Boolean(meta.isStarred); if (!uid) { return; } try { - const result = await getDashboardSrv().starDashboard(uid, Boolean(meta.isStarred)); + const result = await getDashboardSrv().starDashboard(uid, isStarred); this.setState({ meta: { diff --git a/public/app/features/provisioning/Wizard/FinishStep.tsx b/public/app/features/provisioning/Wizard/FinishStep.tsx index 5414c1929e2..30a4749faaf 100644 --- a/public/app/features/provisioning/Wizard/FinishStep.tsx +++ b/public/app/features/provisioning/Wizard/FinishStep.tsx @@ -12,7 +12,12 @@ import { getGitProviderFields } from './fields'; import { WizardFormData } from './types'; export const FinishStep = memo(function FinishStep() { - const { register, watch, setValue } = useFormContext(); + const { + register, + watch, + setValue, + formState: { errors }, + } = useFormContext(); const settings = useGetFrontendSettingsQuery(); const [type, readOnly] = watch(['repository.type', 'repository.readOnly']); @@ -42,6 +47,8 @@ export const FinishStep = memo(function FinishStep() { 'How often to sync changes from the repository' )} required + error={errors?.repository?.sync?.intervalSeconds?.message} + invalid={!!errors?.repository?.sync?.intervalSeconds?.message} > (); - const repositoryStatusQuery = useGetRepositoryStatusQuery(repoName ? { name: repoName } : skipToken); + const [shouldEnablePolling, setShouldEnablePolling] = useState(true); + + const POLLING_INTERVAL_MS = 5000; + + const repositoryStatusQuery = useGetRepositoryStatusQuery(repoName ? { name: repoName } : skipToken, { + // Disable polling by setting interval to 0 when we should stop + pollingInterval: shouldEnablePolling ? POLLING_INTERVAL_MS : 0, + skipPollingIfUnfocused: true, + }); const { healthy: isRepositoryHealthy, @@ -45,9 +53,21 @@ export const SynchronizeStep = memo(function SynchronizeStep({ checked, } = repositoryStatusQuery?.data?.status?.health || {}; + // healthStatusNotReady: If the repository is not yet ready (e.g., initial setup), synchronization cannot be started. + // User can potentially fail at this step if they click too fast and repo is not ready. + const healthStatusNotReady = + isRepositoryHealthy === false && repositoryStatusQuery?.data?.status?.observedGeneration === 0; + + // Stop polling when repository becomes healthy + useEffect(() => { + if (!healthStatusNotReady) { + setShouldEnablePolling(false); + } + }, [healthStatusNotReady]); + const hasError = repositoryStatusQuery.isError; const isLoading = repositoryStatusQuery.isLoading || repositoryStatusQuery.isFetching; - const isButtonDisabled = hasError || (checked !== undefined && isRepositoryHealthy === false); + const isButtonDisabled = hasError || (checked !== undefined && isRepositoryHealthy === false) || healthStatusNotReady; const startSynchronization = async () => { const [history] = getValues(['migrate.history']); @@ -151,21 +171,43 @@ export const SynchronizeStep = memo(function SynchronizeStep({ )} - - {hasError || (checked !== undefined && isRepositoryHealthy === false) ? ( - - ) : ( - - )} - + {healthStatusNotReady ? ( + <> + + + Repository connecting, synchronize will be ready soon. + + + + + + + + + ) : ( + + {hasError || (checked !== undefined && isRepositoryHealthy === false) ? ( + + ) : ( + + )} + + )} ); }); diff --git a/public/app/features/provisioning/utils/getFormErrors.ts b/public/app/features/provisioning/utils/getFormErrors.ts index 7adcca8063c..3d94975a602 100644 --- a/public/app/features/provisioning/utils/getFormErrors.ts +++ b/public/app/features/provisioning/utils/getFormErrors.ts @@ -3,7 +3,7 @@ import { ErrorDetails } from 'app/api/clients/provisioning/v0alpha1'; import { WizardFormData } from '../Wizard/types'; export type RepositoryField = keyof WizardFormData['repository']; -export type RepositoryFormPath = `repository.${RepositoryField}`; +export type RepositoryFormPath = `repository.${RepositoryField}` | `repository.sync.intervalSeconds`; export type FormErrorTuple = [RepositoryFormPath | null, { message: string } | null]; /** @@ -25,7 +25,13 @@ export const getFormErrors = (errors: ErrorDetails[]): FormErrorTuple => { 'bitbucket.url', 'git.branch', 'git.url', + 'sync.intervalSeconds', ]; + + const nestedFieldMap: Record = { + 'sync.intervalSeconds': 'repository.sync.intervalSeconds', + }; + const fieldMap: Record = { path: 'repository.path', branch: 'repository.branch', @@ -37,6 +43,12 @@ export const getFormErrors = (errors: ErrorDetails[]): FormErrorTuple => { if (error.field) { const cleanField = error.field.replace('spec.', ''); if (fieldsToValidate.includes(cleanField)) { + // Check for direct nested field mapping first + if (cleanField in nestedFieldMap) { + return [nestedFieldMap[cleanField], { message: error.detail || `Invalid ${cleanField}` }]; + } + + // Fall back to simple field mapping for non-nested fields const fieldParts = cleanField.split('.'); const lastPart = fieldParts[fieldParts.length - 1]; diff --git a/public/app/features/query/components/QueryEditorRow.test.tsx b/public/app/features/query/components/QueryEditorRow.test.tsx index e87dd718b95..f839e570819 100644 --- a/public/app/features/query/components/QueryEditorRow.test.tsx +++ b/public/app/features/query/components/QueryEditorRow.test.tsx @@ -4,6 +4,7 @@ import { PropsWithChildren } from 'react'; import { CoreApp, DataQueryRequest, dateTime, LoadingState, PanelData, toDataFrame } from '@grafana/data'; import { DataQuery } from '@grafana/schema'; import { mockDataSource } from 'app/features/alerting/unified/mocks'; +import { ExpressionDatasourceUID } from 'app/features/expressions/types'; import { filterPanelDataToQuery, Props, QueryEditorRow } from './QueryEditorRow'; @@ -464,5 +465,28 @@ describe('QueryEditorRow', () => { expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument(); }); }); + + it('should not render saved queries buttons when query is an expression query', async () => { + const expressionQuery = { + refId: 'B', + datasource: { + uid: ExpressionDatasourceUID, + type: '__expr__', + }, + }; + + const expressionProps = { + ...props(testData), + query: expressionQuery, + queries: [expressionQuery], + }; + + render(); + + await waitFor(() => { + expect(screen.queryByText('Save query')).not.toBeInTheDocument(); + expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument(); + }); + }); }); }); diff --git a/public/app/features/query/components/QueryEditorRow.tsx b/public/app/features/query/components/QueryEditorRow.tsx index fb469d17aa7..fe8fa6c018e 100644 --- a/public/app/features/query/components/QueryEditorRow.tsx +++ b/public/app/features/query/components/QueryEditorRow.tsx @@ -34,6 +34,7 @@ import { } from 'app/core/components/QueryOperationRow/QueryOperationRow'; import { useQueryLibraryContext } from '../../explore/QueryLibrary/QueryLibraryContext'; +import { ExpressionDatasourceUID } from '../../expressions/types'; import { QueryActionComponent, RowActionComponents } from './QueryActionComponent'; import { QueryEditorRowHeader } from './QueryEditorRowHeader'; @@ -386,10 +387,11 @@ export class QueryEditorRow extends PureComponent - {!isEditingQueryLibrary && !isUnifiedAlerting && ( + {!isEditingQueryLibrary && !isUnifiedAlerting && !isExpressionQuery && ( { + return getBackendSrv().get('api/user/stars'); + }); + + if (!starredUids || !uid) { + return null; + } + + const isStarred = starredUids?.includes(uid); return ( } data-testid={selectors.components.NavToolbar.markAsFavorite} - onClick={() => { + onClick={async () => { DashboardInteractions.toolbarFavoritesClick(); - dashboard.onStarDashboard(); + await dashboard.onStarDashboard(isStarred); + retry(); }} /> ); diff --git a/public/app/features/teams/TeamList.tsx b/public/app/features/teams/TeamList.tsx index b1a5952eca8..f08bcbe2658 100644 --- a/public/app/features/teams/TeamList.tsx +++ b/public/app/features/teams/TeamList.tsx @@ -5,6 +5,7 @@ import { connect, ConnectedProps } from 'react-redux'; import { GrafanaTheme2 } from '@grafana/data'; import { Trans, t } from '@grafana/i18n'; +import { config, getBackendSrv } from '@grafana/runtime'; import { Avatar, CellProps, @@ -65,6 +66,7 @@ export const TeamList = ({ changeSort, }: Props) => { const [roleOptions, setRoleOptions] = useState([]); + const [scimGroupSyncEnabled, setScimGroupSyncEnabled] = useState(false); const styles = useStyles2(getStyles); useEffect(() => { @@ -77,6 +79,25 @@ export const TeamList = ({ } }, []); + useEffect(() => { + const checkSCIMSettings = async () => { + if (!config.featureToggles.enableSCIM) { + setScimGroupSyncEnabled(false); + return; + } + try { + const scimSettings = await getBackendSrv().get( + `/apis/scim.grafana.app/v0alpha1/namespaces/${config.namespace}/config` + ); + setScimGroupSyncEnabled(scimSettings?.items[0]?.spec?.enableGroupSync || false); + } catch { + setScimGroupSyncEnabled(false); + } + }; + + checkSCIMSettings(); + }, []); + const canCreate = contextSrv.hasPermission(AccessControlAction.ActionTeamsCreate); const displayRolePicker = shouldDisplayRolePicker(); @@ -198,7 +219,7 @@ export const TeamList = ({ const canReadTeam = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsRead, original); const canDelete = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsDelete, original) && - !original.isProvisioned; + (!scimGroupSyncEnabled || !original.isProvisioned); return ( {canReadTeam && ( @@ -226,7 +247,7 @@ export const TeamList = ({ }, }, ], - [displayRolePicker, hasFetched, rolesLoading, roleOptions, deleteTeam, styles] + [displayRolePicker, hasFetched, rolesLoading, roleOptions, deleteTeam, styles, scimGroupSyncEnabled] ); return ( diff --git a/public/app/features/transformers/docs/content.ts b/public/app/features/transformers/docs/content.ts index 72726236b52..b41b14b57c8 100644 --- a/public/app/features/transformers/docs/content.ts +++ b/public/app/features/transformers/docs/content.ts @@ -369,24 +369,28 @@ This transformation is very useful if your data source does not natively filter The available conditions for all fields are: -- **Regex** - Match a regex expression. - **Is Null** - Match if the value is null. - **Is Not Null** - Match if the value is not null. - **Equal** - Match if the value is equal to the specified value. -- **Different** - Match if the value is different than the specified value. +- **Not Equal** - Match if the value is not equal to the specified value. +- **Regex** - Match a regex expression. The available conditions for string fields are: - **Contains substring** - Match if the value contains the specified substring (case insensitive). - **Does not contain substring** - Match if the value doesn't contain the specified substring (case insensitive). -The available conditions for number and time fields are: +The available conditions for number fields are: - **Greater** - Match if the value is greater than the specified value. - **Lower** - Match if the value is lower than the specified value. - **Greater or equal** - Match if the value is greater or equal. - **Lower or equal** - Match if the value is lower or equal. -- **Range** - Match a range between a specified minimum and maximum, min and max included. A time field will pre-populate with variables to filter by selected time. +- **In between** - Match a range between a specified minimum and maximum, min and max included. + +The available conditions for time fields are: + +- **In between** - Match a range between a specified minimum and maximum. The min and max values will pre-populate with variables to filter by selected time. Consider the following dataset: diff --git a/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json b/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json index 0d9105711ae..e5b7e6896ec 100644 --- a/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json +++ b/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json @@ -2,6 +2,7 @@ "type": "datasource", "name": "PostgreSQL", "id": "grafana-postgresql-datasource", + "executable": "gpx_grafana-postgresql-datasource", "aliasIDs": ["postgres"], "category": "sql", @@ -21,6 +22,9 @@ { "name": "Documentation", "url": "https://grafana.com/docs/grafana/latest/datasources/postgres/" } ] }, + "dependencies": { + "grafanaDependency": ">=11.6.0" + }, "alerting": true, "annotations": true, diff --git a/public/app/plugins/datasource/graphite/datasource.ts b/public/app/plugins/datasource/graphite/datasource.ts index 0a90bc776b7..dd6915c0aaa 100644 --- a/public/app/plugins/datasource/graphite/datasource.ts +++ b/public/app/plugins/datasource/graphite/datasource.ts @@ -1029,9 +1029,15 @@ export class GraphiteDatasource }; if (config.featureToggles.graphiteBackendMode) { - const functions = await this.getResource('functions'); - this.funcDefs = gfunc.parseFuncDefs(functions); - return this.funcDefs; + try { + const functions = await this.getResource('functions'); + this.funcDefs = gfunc.parseFuncDefs(functions); + return this.funcDefs; + } catch (error) { + console.error('Fetching graphite functions error', error); + this.funcDefs = gfunc.getFuncDefs(this.graphiteVersion); + return this.funcDefs; + } } return lastValueFrom( diff --git a/public/app/plugins/datasource/influxdb/datasource.test.ts b/public/app/plugins/datasource/influxdb/datasource.test.ts index 12ae898d58d..ec81ad57f1d 100644 --- a/public/app/plugins/datasource/influxdb/datasource.test.ts +++ b/public/app/plugins/datasource/influxdb/datasource.test.ts @@ -273,6 +273,15 @@ describe('interpolateQueryExpr', () => { replace: jest.fn().mockImplementation((...rest: unknown[]) => 'templateVarReplaced'), } as unknown as TemplateSrv; let ds = getMockInfluxDS(getMockDSInstanceSettings(), templateSrvStub); + + // Mock console.warn as we expect tests to use it + beforeEach(() => { + jest.spyOn(console, 'warn').mockImplementation(); + }); + afterEach(() => { + jest.restoreAllMocks(); + }); + it('should return the value as it is', () => { const value = 'normalValue'; const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build(); @@ -281,6 +290,18 @@ describe('interpolateQueryExpr', () => { expect(result).toBe(expectation); }); + it('should return the escaped value if the value wrapped in regex without !~ or =~', () => { + const value = '/special/path'; + const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build(); + const result = ds.interpolateQueryExpr( + value, + variableMock, + 'select atan(z/sqrt(3.14)), that where path /$tempVar/' + ); + const expectation = `\\/special\\/path`; + expect(result).toBe(expectation); + }); + it('should return the escaped value if the value wrapped in regex', () => { const value = '/special/path'; const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build(); diff --git a/public/app/plugins/datasource/influxdb/datasource.ts b/public/app/plugins/datasource/influxdb/datasource.ts index b073f333fcd..9fb42a6f236 100644 --- a/public/app/plugins/datasource/influxdb/datasource.ts +++ b/public/app/plugins/datasource/influxdb/datasource.ts @@ -360,10 +360,7 @@ export default class InfluxDatasource extends DataSourceWithBackend escapeRegex(v)).join('|')})`; + // If the value is a string array first escape them then join them with pipe + // then put inside parenthesis. + return typeof value === 'string' ? escapeRegex(value) : `(${value.map((v) => escapeRegex(v)).join('|')})`; + } catch (e) { + console.warn(`Supplied match is not valid regex: ${match}`); + } } return value; diff --git a/public/app/plugins/panel/text/TextPanelEditor.tsx b/public/app/plugins/panel/text/TextPanelEditor.tsx index 75d8f50810e..78bc1fc1600 100644 --- a/public/app/plugins/panel/text/TextPanelEditor.tsx +++ b/public/app/plugins/panel/text/TextPanelEditor.tsx @@ -1,6 +1,5 @@ import { css, cx } from '@emotion/css'; import { useMemo } from 'react'; -import AutoSizer from 'react-virtualized-auto-sizer'; import { GrafanaTheme2, StandardEditorProps } from '@grafana/data'; import { @@ -25,26 +24,17 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp return (
- - {({ width }) => { - if (width === 0) { - return null; - } - return ( - - ); - }} - +
); }; @@ -52,8 +42,6 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp const getStyles = (theme: GrafanaTheme2) => ({ editorBox: css({ label: 'editorBox', - border: `1px solid ${theme.colors.border.medium}`, - borderRadius: theme.shape.radius.default, margin: theme.spacing(0.5, 0), width: '100%', }), diff --git a/public/locales/cs-CZ/grafana.json b/public/locales/cs-CZ/grafana.json index fca006c3f6a..7a4a9c5fb99 100644 --- a/public/locales/cs-CZ/grafana.json +++ b/public/locales/cs-CZ/grafana.json @@ -10228,7 +10228,7 @@ "title": "Zdroje dat" }, "databases": { - "title": "Databáze" + "title": "" }, "datasources": { "subtitle": "Přidávejte a konfigurujte zdroje dat", @@ -11772,6 +11772,8 @@ "button-cancelling": "", "button-next": "Dokončit", "button-start": "Zahájit synchronizaci", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Tímto odstraníte konfiguraci úložiště a přijdete o veškerý pokrok. Opravdu chcete zahodit změny?", "confirm": "Ano, zahodit", diff --git a/public/locales/de-DE/grafana.json b/public/locales/de-DE/grafana.json index 866559073c6..bb98fc3edcc 100644 --- a/public/locales/de-DE/grafana.json +++ b/public/locales/de-DE/grafana.json @@ -10166,7 +10166,7 @@ "title": "Datenquellen" }, "databases": { - "title": "Datenbanken" + "title": "" }, "datasources": { "subtitle": "Füge Datenquellen hinzu und konfiguriere sie", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Fertigstellen", "button-start": "Synchronisierung starten", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Dadurch wird die Repository-Konfiguration gelöscht und Sie verlieren alle Fortschritte. Sind Sie sicher, dass Sie Ihre Änderungen verwerfen möchten?", "confirm": "Ja, verwerfen", diff --git a/public/locales/en-US/grafana.json b/public/locales/en-US/grafana.json index 01dc3cacaa4..364cb079f3d 100644 --- a/public/locales/en-US/grafana.json +++ b/public/locales/en-US/grafana.json @@ -3496,6 +3496,7 @@ "move-modal-field-label": "Folder name", "move-modal-text": "This action will move the following content:", "move-modal-title": "Move", + "move-provisioned-folder": "Move provisioned folder", "moving": "Moving...", "new-folder-name-required-phrase": "Folder name is required.", "selected-mix-resources-modal-text": "You have selected both provisioned and non-provisioned resources. These cannot be processed together. Please select only provisioned resources or only non-provisioned resources and try again.", @@ -11707,6 +11708,8 @@ "button-cancelling": "Cancelling...", "button-next": "Finish", "button-start": "Begin synchronization", + "check-status-button": "Check repository status", + "check-status-message": "Repository connecting, synchronize will be ready soon.", "discard-modal": { "body": "This will delete the repository configuration and you will lose all progress. Are you sure you want to discard your changes?", "confirm": "Yes, discard", diff --git a/public/locales/es-ES/grafana.json b/public/locales/es-ES/grafana.json index 8ca59f74551..763065d33c1 100644 --- a/public/locales/es-ES/grafana.json +++ b/public/locales/es-ES/grafana.json @@ -10166,7 +10166,7 @@ "title": "Orígenes de datos" }, "databases": { - "title": "Bases de datos" + "title": "" }, "datasources": { "subtitle": "Añadir y configurar orígenes de datos", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Terminar", "button-start": "Iniciar la sincronización", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Esto eliminará la configuración del repositorio y se perderá todo el progreso. ¿Seguro que quieres descartar los cambios?", "confirm": "Sí, descartar", diff --git a/public/locales/fr-FR/grafana.json b/public/locales/fr-FR/grafana.json index 5ae2ac4923d..c99717bb9c5 100644 --- a/public/locales/fr-FR/grafana.json +++ b/public/locales/fr-FR/grafana.json @@ -10166,7 +10166,7 @@ "title": "Sources de données" }, "databases": { - "title": "Bases de données" + "title": "" }, "datasources": { "subtitle": "Ajouter et configurer des sources de données", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Terminer", "button-start": "Commencer la synchronisation", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Cette action supprimera la configuration du dépôt et vous perdrez toutes vos modifications. Voulez-vous vraiment annuler vos modifications ?", "confirm": "Oui, annuler", diff --git a/public/locales/hu-HU/grafana.json b/public/locales/hu-HU/grafana.json index 20bc7febf35..258bdc1e86e 100644 --- a/public/locales/hu-HU/grafana.json +++ b/public/locales/hu-HU/grafana.json @@ -10166,7 +10166,7 @@ "title": "Adatforrások" }, "databases": { - "title": "Adatbázisok" + "title": "" }, "datasources": { "subtitle": "Adatforrások hozzáadása és konfigurálása", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Befejezés", "button-start": "Szinkronizálás indítása", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Ez törli az adattár konfigurációját, és az összes előrehaladás el fog veszni. Biztosan elveti a módosításokat?", "confirm": "Igen, elvetés", diff --git a/public/locales/id-ID/grafana.json b/public/locales/id-ID/grafana.json index 12b4e9be979..73455bde660 100644 --- a/public/locales/id-ID/grafana.json +++ b/public/locales/id-ID/grafana.json @@ -10135,7 +10135,7 @@ "title": "Sumber data" }, "databases": { - "title": "Database" + "title": "" }, "datasources": { "subtitle": "Tambahkan dan konfigurasikan sumber data", @@ -11652,6 +11652,8 @@ "button-cancelling": "", "button-next": "Selesai", "button-start": "Mulai sinkronisasi", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Ini akan menghapus konfigurasi repositori dan Anda akan kehilangan semua kemajuan. Yakin ingin membuang perubahan Anda?", "confirm": "Ya, buang", diff --git a/public/locales/it-IT/grafana.json b/public/locales/it-IT/grafana.json index b0c4a37757a..d70def019ff 100644 --- a/public/locales/it-IT/grafana.json +++ b/public/locales/it-IT/grafana.json @@ -10166,7 +10166,7 @@ "title": "Fonti dei dati" }, "databases": { - "title": "Database" + "title": "" }, "datasources": { "subtitle": "Aggiungi e configura origini dati", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Fine", "button-start": "Inizia la sincronizzazione", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Questa operazione eliminerà la configurazione del repository e perderai tutti i progressi. Vuoi davvero eliminare le modifiche?", "confirm": "Sì, elimina", diff --git a/public/locales/ja-JP/grafana.json b/public/locales/ja-JP/grafana.json index 26aab882838..4d639e10f2d 100644 --- a/public/locales/ja-JP/grafana.json +++ b/public/locales/ja-JP/grafana.json @@ -10135,7 +10135,7 @@ "title": "データソース" }, "databases": { - "title": "データベース" + "title": "" }, "datasources": { "subtitle": "データソースを追加および構成する", @@ -11652,6 +11652,8 @@ "button-cancelling": "", "button-next": "完了", "button-start": "同期を開始", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "この操作によりリポジトリの設定が削除され、すべての変更内容が失われます。本当に破棄してもよろしいですか?", "confirm": "はい、破棄します", diff --git a/public/locales/ko-KR/grafana.json b/public/locales/ko-KR/grafana.json index 9bda7bad46a..990f9805176 100644 --- a/public/locales/ko-KR/grafana.json +++ b/public/locales/ko-KR/grafana.json @@ -10135,7 +10135,7 @@ "title": "데이터 소스" }, "databases": { - "title": "데이터베이스" + "title": "" }, "datasources": { "subtitle": "데이터 소스 추가 및 구성", @@ -11652,6 +11652,8 @@ "button-cancelling": "", "button-next": "완료", "button-start": "동기화 시작", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "이렇게 하면 리포지토리 구성이 삭제되고 지금까지의 작업 내용이 모두 사라집니다. 정말 변경 사항을 취소하시겠어요?", "confirm": "네, 취소해 주세요", diff --git a/public/locales/nl-NL/grafana.json b/public/locales/nl-NL/grafana.json index 0a8f8a1cd5b..259e3cccb50 100644 --- a/public/locales/nl-NL/grafana.json +++ b/public/locales/nl-NL/grafana.json @@ -10166,7 +10166,7 @@ "title": "Gegevensbronnen" }, "databases": { - "title": "Databases" + "title": "" }, "datasources": { "subtitle": "Gegevensbronnen toevoegen en configureren", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Voltooien", "button-start": "Synchronisatie starten", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Hiermee wordt de repository-configuratie verwijderd en verlies je alle voortgang. Weet je zeker dat je je wijzigingen wilt negeren?", "confirm": "Ja, verwijderen", diff --git a/public/locales/pl-PL/grafana.json b/public/locales/pl-PL/grafana.json index 2bd9b8a504c..ad752de6f02 100644 --- a/public/locales/pl-PL/grafana.json +++ b/public/locales/pl-PL/grafana.json @@ -10228,7 +10228,7 @@ "title": "Źródła danych" }, "databases": { - "title": "Bazy danych" + "title": "" }, "datasources": { "subtitle": "Dodaj i skonfiguruj źródła danych", @@ -11772,6 +11772,8 @@ "button-cancelling": "", "button-next": "Zakończ", "button-start": "Rozpocznij synchronizację", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Spowoduje to usunięcie konfiguracji repozytorium i utracisz wszystkie postępy. Czy na pewno chcesz odrzucić zmiany?", "confirm": "Tak, odrzuć", diff --git a/public/locales/pt-BR/grafana.json b/public/locales/pt-BR/grafana.json index a7fe703c13c..9af03ad58fb 100644 --- a/public/locales/pt-BR/grafana.json +++ b/public/locales/pt-BR/grafana.json @@ -10166,7 +10166,7 @@ "title": "Fontes de dados" }, "databases": { - "title": "Bancos de dados" + "title": "" }, "datasources": { "subtitle": "Adicione e configure fontes de dados", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Finalizar", "button-start": "Iniciar sincronização", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Isso excluirá a configuração do repositório e você perderá todo o progresso. Tem certeza de que deseja descartar suas alterações?", "confirm": "Sim, descartar", diff --git a/public/locales/pt-PT/grafana.json b/public/locales/pt-PT/grafana.json index 4c6e5968eba..7d58847a6f5 100644 --- a/public/locales/pt-PT/grafana.json +++ b/public/locales/pt-PT/grafana.json @@ -10166,7 +10166,7 @@ "title": "Origens de dados" }, "databases": { - "title": "Bases de dados" + "title": "" }, "datasources": { "subtitle": "Adicionar e configurar origens de dados", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Concluir", "button-start": "Iniciar sincronização", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Isto eliminará a configuração do repositório e todo o progresso será perdido. Tem a certeza de que pretende rejeitar as suas alterações?", "confirm": "Sim, rejeitar", diff --git a/public/locales/ru-RU/grafana.json b/public/locales/ru-RU/grafana.json index 3ce0cf27913..c6dbc108ff9 100644 --- a/public/locales/ru-RU/grafana.json +++ b/public/locales/ru-RU/grafana.json @@ -10228,7 +10228,7 @@ "title": "Источники данных" }, "databases": { - "title": "Базы данных" + "title": "" }, "datasources": { "subtitle": "Добавление и настройка источников данных", @@ -11772,6 +11772,8 @@ "button-cancelling": "", "button-next": "Готово", "button-start": "Начать синхронизацию", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Конфигурация репозитория будет удалена, и вы потеряете весь прогресс. Действительно отменить изменения?", "confirm": "Да, отменить", diff --git a/public/locales/sv-SE/grafana.json b/public/locales/sv-SE/grafana.json index f28eee1ea6a..840a0d68dc4 100644 --- a/public/locales/sv-SE/grafana.json +++ b/public/locales/sv-SE/grafana.json @@ -10166,7 +10166,7 @@ "title": "Datakällor" }, "databases": { - "title": "Databaser" + "title": "" }, "datasources": { "subtitle": "Lägg till och konfigurera datakällor", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Slutför", "button-start": "Påbörja synkronisering", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Detta kommer att radera lagringsplatskonfigurationen och du kommer att förlora alla ändringar. Är du säker på att du vill kassera dina ändringar?", "confirm": "Ja, kassera", diff --git a/public/locales/tr-TR/grafana.json b/public/locales/tr-TR/grafana.json index 92e8f71dde5..884dc555c5f 100644 --- a/public/locales/tr-TR/grafana.json +++ b/public/locales/tr-TR/grafana.json @@ -10166,7 +10166,7 @@ "title": "Veri kaynakları" }, "databases": { - "title": "Veri tabanları " + "title": "" }, "datasources": { "subtitle": "Veri kaynakları ekleyin ve yapılandırın", @@ -11692,6 +11692,8 @@ "button-cancelling": "", "button-next": "Sonlandır", "button-start": "Senkronizasyonu başlat", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Bu işlem, depo yapılandırmasını silecek ve tüm ilerlemeyi kaybedeceksiniz. Değişikliklerinizi silmek istediğinizden emin misiniz?", "confirm": "Evet, yoksay", diff --git a/public/locales/zh-Hans/grafana.json b/public/locales/zh-Hans/grafana.json index 03dc6510da7..68d83a91518 100644 --- a/public/locales/zh-Hans/grafana.json +++ b/public/locales/zh-Hans/grafana.json @@ -10135,7 +10135,7 @@ "title": "数据源" }, "databases": { - "title": "数据库" + "title": "" }, "datasources": { "subtitle": "添加并配置数据源", @@ -11652,6 +11652,8 @@ "button-cancelling": "", "button-next": "完成", "button-start": "开始同步", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "此操作将删除存储库配置,您将丢失所有进度。您确定要丢弃更改吗?", "confirm": "是,丢弃", diff --git a/public/locales/zh-Hant/grafana.json b/public/locales/zh-Hant/grafana.json index fa5cf2e589b..fc24819d7ec 100644 --- a/public/locales/zh-Hant/grafana.json +++ b/public/locales/zh-Hant/grafana.json @@ -10135,7 +10135,7 @@ "title": "資料來源" }, "databases": { - "title": "資料庫" + "title": "" }, "datasources": { "subtitle": "新增並設定資料來源", @@ -11652,6 +11652,8 @@ "button-cancelling": "", "button-next": "結束", "button-start": "開始同步處理", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "這將刪除儲存庫設定,您將失去所有進度。確定要捨棄變更嗎?", "confirm": "是的,捨棄",