diff --git a/.air.toml b/.air.toml index dd0faa01f23..2959e88838e 100644 --- a/.air.toml +++ b/.air.toml @@ -1,13 +1,13 @@ [build] -bin = "./bin/grafana" +bin = "./bin/grafana-air" args_bin = ["server", "-profile", "-profile-addr=127.0.0.1", "-profile-port=6000", "-profile-block-rate=1", "-profile-mutex-rate=5", "-packaging=dev", "cfg:app_mode=development"] -cmd = "make GO_BUILD_DEV=1 build-backend" +cmd = "make GO_BUILD_DEV=1 build-air" exclude_regex = ["_test.go", "_gen.go"] exclude_unchanged = true follow_symlink = true -include_dir = ["apps", "conf", "devenv/dev-dashboards", "pkg", "public/views"] +include_dir = ["apps", "conf", "pkg", "public/views"] include_ext = ["go", "ini", "toml", "html", "json"] -stop_on_error = false +stop_on_error = true send_interrupt = true kill_delay = 500 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 85db1f8d4c7..bf54aeed6b4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -32,7 +32,9 @@ /devenv/README.md @grafana/docs-grafana # START Technical documentation -/.vale.ini @grafana/docs-tooling +/.vale.ini @grafana/docs-tooling +/AGENTS.md @grafana/docs-tooling + # `make docs` procedure and related workflows are owned @grafana/docs-tooling. Slack #docs. /docs/ @grafana/docs-tooling @@ -1279,6 +1281,7 @@ embed.go @grafana/grafana-as-code /.github/license_finder.yaml @bergquist /.github/actionlint.yaml @grafana/grafana-developer-enablement-squad /.github/workflows/pr-test-docker.yml @grafana/grafana-developer-enablement-squad +/.github/workflows/update-schema-types.yml @grafana/plugins-platform-frontend # Generated files not requiring owner approval /packages/grafana-data/src/types/featureToggles.gen.ts @grafanabot diff --git a/.github/workflows/feature-toggles-ci.yml b/.github/workflows/feature-toggles-ci.yml index 67d1d37c739..d8808d86d67 100644 --- a/.github/workflows/feature-toggles-ci.yml +++ b/.github/workflows/feature-toggles-ci.yml @@ -11,6 +11,8 @@ permissions: {} jobs: test: + name: Feature toggles documentation is in sync with source + runs-on: ubuntu-latest permissions: diff --git a/.github/workflows/pr-e2e-tests.yml b/.github/workflows/pr-e2e-tests.yml index 6a61f3bf5ca..6ade5a9c787 100644 --- a/.github/workflows/pr-e2e-tests.yml +++ b/.github/workflows/pr-e2e-tests.yml @@ -149,19 +149,11 @@ jobs: needs: - build-grafana steps: - - id: vault-secrets - uses: grafana/shared-workflows/actions/get-vault-secrets@main + - id: get-github-token + name: "create github app token" + uses: grafana/shared-workflows/actions/create-github-app-token@eb02241ed0a92aff205feab8ac3afcdf51c757c8 # create-github-app-token-v0.2.0 with: - repo_secrets: | - GRAFANA_DELIVERY_BOT_APP_PEM=delivery-bot-app:PRIVATE_KEY - - name: Generate token - id: generate_token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a - with: - app_id: ${{ vars.DELIVERY_BOT_APP_ID }} - private_key: ${{ env.GRAFANA_DELIVERY_BOT_APP_PEM }} - repositories: '["grafana"]' - permissions: '{"checks": "write"}' + github_app: "delivery-bot-app" - uses: grafana/shared-workflows/actions/login-to-gar@main id: login-to-gar with: @@ -184,7 +176,7 @@ jobs: echo "IMAGE=${DOCKER_IMAGE}" >> "$GITHUB_ENV" - name: Add PR status check env: - GH_TOKEN: ${{ steps.generate_token.outputs.token }} + GH_TOKEN: ${{ steps.get-github-token.outputs.token }} SHA: ${{ github.event.pull_request.head.sha }} run: | gh api \ diff --git a/.github/workflows/release-npm.yml b/.github/workflows/release-npm.yml index e4bed54a7f8..4dd92e9d0a1 100644 --- a/.github/workflows/release-npm.yml +++ b/.github/workflows/release-npm.yml @@ -39,12 +39,14 @@ permissions: {} jobs: # If called with version_type 'canary' or 'stable', build + publish to NPM - # If called with version_type 'nightly', just tag the given version with nightly tag. It was already published by the canary build. + # If called with version_type 'nightly', do nothing (we're not yet tagging them with the nightly tag) publish: name: Publish NPM packages runs-on: github-hosted-ubuntu-x64-small if: inputs.version_type == 'canary' || inputs.version_type == 'stable' + # Required for this workflow to have permission to publish NPM packages + environment: npm-publish permissions: contents: read id-token: write @@ -130,18 +132,3 @@ jobs: env: NPM_TAG: ${{ steps.npm-tag.outputs.NPM_TAG }} run: ./scripts/publish-npm-packages.sh --dist-tag "$NPM_TAG" --registry 'https://registry.npmjs.org/' - - # TODO: finish this step - tag-nightly: - name: Tag nightly release - runs-on: github-hosted-ubuntu-x64-small - if: inputs.version_type == 'nightly' - - steps: - - name: Checkout workflow ref - uses: actions/checkout@v4 - with: - persist-credentials: false - - # TODO: tag the given release with nightly - diff --git a/.github/workflows/storybook-a11y.yml b/.github/workflows/storybook-a11y.yml index 9a36f7aefdf..68ff4e75e37 100644 --- a/.github/workflows/storybook-a11y.yml +++ b/.github/workflows/storybook-a11y.yml @@ -39,9 +39,10 @@ jobs: - uses: actions/checkout@v5 with: persist-credentials: false - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v5 with: node-version-file: '.nvmrc' + package-manager-cache: false # too large for GH's cache limits :-( - run: yarn install --immutable --check-cache - name: Install Playwright browsers run: npx playwright install --with-deps @@ -68,9 +69,10 @@ jobs: - uses: actions/checkout@v5 with: persist-credentials: false - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v5 with: node-version-file: '.nvmrc' + package-manager-cache: false # too large for GH's cache limits :-( - run: yarn install --immutable --check-cache - name: Install Playwright browsers run: npx playwright install --with-deps diff --git a/.github/workflows/update-schema-types.yml b/.github/workflows/update-schema-types.yml new file mode 100644 index 00000000000..a079f1d313e --- /dev/null +++ b/.github/workflows/update-schema-types.yml @@ -0,0 +1,22 @@ +name: Update Schema Types + +on: + push: + branches: + - main + paths: + - docs/sources/developers/plugins/plugin.schema.json + workflow_dispatch: + +# These permissions are needed to assume roles from Github's OIDC. +permissions: + contents: read + id-token: write + +jobs: + bundle-schema-types: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: grafana/plugin-actions/bundle-schema-types@main diff --git a/.github/workflows/verify-kinds.yml b/.github/workflows/verify-kinds.yml index 6319baa51a9..0fe82c5e67e 100644 --- a/.github/workflows/verify-kinds.yml +++ b/.github/workflows/verify-kinds.yml @@ -6,9 +6,13 @@ on: paths: - '**/*.cue' +permissions: {} + jobs: main: runs-on: "ubuntu-latest" + permissions: + contents: read # clone repository steps: - name: "Checkout Grafana repo" uses: "actions/checkout@v5" diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000000..5d12c20603b --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,297 @@ +# AGENTS.md + + + + + +## Documentation + +Instructions for documentation authoring in Markdown files. + +DOCS.md contains all the Docs AI toolkit docs in one file. + +## Role + +Act as an experienced software engineer and technical writer for Grafana Labs. + +Write for software developers and engineers who understand general programming concepts. + +Focus on practical implementation and clear problem-solving guidance. + +### Grafana + +Use full product names on first mention, then short names: + +- Grafana Alloy (full), Alloy (short) +- Grafana Beyla (full), Beyla (short) + +Use "OpenTelemetry Collector" on first mention, then "Collector" for subsequent references. +Keep full name for distributions, headings, and links. + +Always use "Grafana Cloud" in full. + +Use complete terms: + +- "OpenTelemetry" (not "OTel") +- "Kubernetes" (not "K8s") + +Present observability signals in order: metrics, logs, traces, and profiles. + +Focus content on Grafana solutions when discussing integrations or migrations. + +## Style + +### Structure + +Structure articles into sections with headings. + +Leave Markdown front matter content between two triple dashes `---`. + +The front matter YAML `title` and the content h1 (#) heading should be the same. +Make sure there's an h1 heading in the content; this redundancy is required. + +Always include copy after a heading or between headings, for example: + +```markdown +## Heading + +Immediately followed by copy and not another heading. + +## Sub heading +``` + +The immediate copy after a heading should introduce and provide an overview of what's covered in the section. + +Start articles with an introduction that covers the goal of the article. Example goals: + +- Learn concepts +- Set up or install something +- Configure something +- Use a product to solve a business problem +- Troubleshoot a problem +- Integrate with other software or systems +- Migrate from one thing to another +- Refer to APIs or reference documentation + +Follow the goal with a list of prerequisites, for example: + +```markdown +Before you begin, ensure you have the following: + +- +- +- ... +``` + +Suggest and link to next steps and related resources at the end of the article, for example: + +- Learn more about A, B, C +- Configure X +- Use X to achieve Y +- Use X to achieve Z +- Project homepage or documentation +- Project repository (for example, GitHub, GitLab) +- Project package (for example, pip or NPM) + +You don't need to use the "Refer to..." syntax for next steps; use the link text directly. + +### Copy + +Write simple, direct copy with short sentences and paragraphs. + +Use contractions: + +- it's, isn't, that's, you're, don't + +Choose simple words: + +- use (not utilize) +- help (not assist) +- show (not demonstrate) + +Write with verbs and nouns. Use minimal adjectives except when describing Grafana Labs products. + +## Tense + +Write in present simple tense. + +Avoid present continuous tense. + +Only write in future tense to show future actions. + +### Voice + +Always write in an active voice. + +Change passive voice to active voice. + +### Perspective + +Address users as "you". + +Use second person perspective consistently. + +### Wordlist + +Use allowlist/blocklist instead of whitelist/blacklist. + +Use primary/secondary instead of master/slave. + +Use "refer to" instead of "see", "consult", "check out", and other phrases. + +### Formatting + +Use sentence case for titles and headings. + +Use inline Markdown links: [Link text](https://example.com). + +Link to other sections using descriptive phrases that include the section name: +"For setup details, refer to the [Lists](#lists) section." + +Bold text with two asterisks: **bold** + +Emphasize text with one underscore: _italics_ + +Format UI elements using sentence case as they appear: + +- Click **Submit**. +- Navigate to **User settings**. +- Configure **Alerting rules**. + +### Lists + +Write complete sentences for lists: + +- Works with all languages and frameworks (correct) +- All languages and frameworks (incorrect) + +Use dashes for unordered lists. + +Bold keywords at list start and follow with a colon. + +### Images + +Include descriptive alt text that conveys the essential information or purpose. + +Write alt text without "Image of..." or "Picture of..." prefixes. + +### Code + +Use single code backticks for: + +- user input +- placeholders in markdown, for example _``_ +- files and directories, for example `/opt/file.md` +- source code keywords and identifiers, + for example variables, function and class names +- configuration options and values, for example `PORT` and `80` +- status codes, for example `404` + +Use triple code backticks followed by the syntax for code blocks, for example: + +```javascript +console.log('Hello World!'); +``` + +Introduce each code block with a short description. +End the introduction with a colon if the code sample follows it, for example: + +```markdown +The code sample outputs "Hello World!" to the browser console: + + +``` + +Use descriptive placeholder names in code samples. +Use uppercase letters with underscores to separate words in placeholders, +for example: + +```sh +OTEL_RESOURCE_ATTRIBUTES="service.name= +OTEL_EXPORTER_OTLP_ENDPOINT= +``` + +The placeholder includes the name and the less than and greater than symbols, +for example . + +If the placeholder is markdown emphasize it with underscores, +for example _``_. + +In code blocks use the placeholder without additional backticks or emphasis, +for example . + +Provide an explanation for each placeholder, +typically in the text following the code block or in a configuration section. + +Follow code samples with an explanation +and configuration options for placeholders, for example: + +```markdown + + +This code sets required environment variables +to send OTLP data to an OTLP endpoint. +To configure the code refer to the configuration section. + + +``` + +Put configuration for a code block after the code block. + +## APIs + +When documenting API endpoints specify the HTTP method, +for example `GET`, `POST`, `PUT`, `DELETE`. + +Provide the full request path, using backticks. + +Use backticks for parameter names and example values. + +Use placeholders like `{userId}` for path parameters, for example: + +- To retrieve user details, make a `GET` request to `/api/v1/users/{userId}`. + +### CLI commands + +When presenting CLI commands and their output, +introduce the command with a brief explanation of its purpose. +Clearly distinguish the command from its output. + +For commands, use `sh` to specify the code block language. + +For output, use a generic specifier like `text`, `console`, +or `json`/`yaml` if the output is structured. + +For example: + +```markdown +To list all running pods in the `default` namespace, use the following command: + + +``` + +The output will resemble the following: + +```text +NAME READY STATUS RESTARTS AGE +my-app-deployment-7fdb6c5f65-abcde 1/1 Running 0 2d1h +another-service-pod-xyz123 2/2 Running 0 5h30m +``` + +### Shortcodes + +Leave Hugo shortcodes in the content when editing. + +Use our custom admonition Hugo shortcode for notes, cautions, or warnings, +with `` as "note", "caution", or "warning": + +```markdown +{{< admonition type="" >}} +... +{{< /admonition >}} +``` + +Use admonitions sparingly. +Only include exceptional information in admonitions. + + diff --git a/Makefile b/Makefile index b6ea79ea13e..e76e5a18940 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ GO_RACE_FLAG := $(if $(GO_RACE),-race) GO_BUILD_FLAGS += $(if $(GO_BUILD_DEV),-dev) GO_BUILD_FLAGS += $(if $(GO_BUILD_TAGS),-build-tags=$(GO_BUILD_TAGS)) GO_BUILD_FLAGS += $(GO_RACE_FLAG) +GO_BUILD_FLAGS += $(if $(GO_BUILD_CGO),-cgo-enabled=$(GO_BUILD_CGO)) GO_TEST_FLAGS += $(if $(GO_BUILD_TAGS),-tags=$(GO_BUILD_TAGS)) GIT_BASE = remotes/origin/main @@ -245,6 +246,10 @@ build-backend: ## Build Grafana backend. @echo "build backend" $(GO) run build.go $(GO_BUILD_FLAGS) build-backend +.PHONY: build-air +build-air: build-backend + @cp ./bin/grafana ./bin/grafana-air + .PHONY: build-server build-server: ## Build Grafana server. @echo "build server" diff --git a/apps/alerting/alertenrichment/go.mod b/apps/alerting/alertenrichment/go.mod index 429c709a982..0d55e99d1ac 100644 --- a/apps/alerting/alertenrichment/go.mod +++ b/apps/alerting/alertenrichment/go.mod @@ -28,7 +28,7 @@ require ( go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/net v0.44.0 // indirect golang.org/x/text v0.29.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect diff --git a/apps/alerting/alertenrichment/go.sum b/apps/alerting/alertenrichment/go.sum index de070b2e44c..49bebc166ef 100644 --- a/apps/alerting/alertenrichment/go.sum +++ b/apps/alerting/alertenrichment/go.sum @@ -91,8 +91,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/alerting/notifications/definitions/alerting-notifications-manifest.yaml b/apps/alerting/notifications/definitions/alerting-notifications-manifest.yaml index 447c5d5f747..aa40d7c74b3 100644 --- a/apps/alerting/notifications/definitions/alerting-notifications-manifest.yaml +++ b/apps/alerting/notifications/definitions/alerting-notifications-manifest.yaml @@ -5,6 +5,7 @@ metadata: spec: appName: alerting-notifications group: notifications.alerting.grafana.app + preferredVersion: v0alpha1 versions: - kinds: - conversion: false diff --git a/apps/alerting/notifications/go.mod b/apps/alerting/notifications/go.mod index 8956083e35c..6ab1de81ecb 100644 --- a/apps/alerting/notifications/go.mod +++ b/apps/alerting/notifications/go.mod @@ -94,7 +94,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/apps/alerting/notifications/go.sum b/apps/alerting/notifications/go.sum index 2d8f7d8da91..4986bf20c51 100644 --- a/apps/alerting/notifications/go.sum +++ b/apps/alerting/notifications/go.sum @@ -284,8 +284,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go. google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/alerting/notifications/pkg/apis/alertingnotifications_manifest.go b/apps/alerting/notifications/pkg/apis/alertingnotifications_manifest.go index 3537a6e3e8d..84850cb31a1 100644 --- a/apps/alerting/notifications/pkg/apis/alertingnotifications_manifest.go +++ b/apps/alerting/notifications/pkg/apis/alertingnotifications_manifest.go @@ -34,8 +34,9 @@ var ( ) var appManifestData = app.ManifestData{ - AppName: "alerting-notifications", - Group: "notifications.alerting.grafana.app", + AppName: "alerting-notifications", + Group: "notifications.alerting.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/alerting/rules/definitions/alerting-manifest.yaml b/apps/alerting/rules/definitions/alerting-manifest.yaml index 22808e9deba..836bd2c9a94 100644 --- a/apps/alerting/rules/definitions/alerting-manifest.yaml +++ b/apps/alerting/rules/definitions/alerting-manifest.yaml @@ -5,6 +5,7 @@ metadata: spec: appName: alerting group: rules.alerting.grafana.app + preferredVersion: v0alpha1 versions: - kinds: - conversion: false diff --git a/apps/alerting/rules/go.mod b/apps/alerting/rules/go.mod index 418abe0d193..04e54ebd402 100644 --- a/apps/alerting/rules/go.mod +++ b/apps/alerting/rules/go.mod @@ -77,7 +77,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/alerting/rules/go.sum b/apps/alerting/rules/go.sum index dcb8dea6862..6418db2eb84 100644 --- a/apps/alerting/rules/go.sum +++ b/apps/alerting/rules/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/alerting/rules/pkg/apis/alerting_manifest.go b/apps/alerting/rules/pkg/apis/alerting_manifest.go index abbc407ddf3..ea4f2530948 100644 --- a/apps/alerting/rules/pkg/apis/alerting_manifest.go +++ b/apps/alerting/rules/pkg/apis/alerting_manifest.go @@ -28,8 +28,9 @@ var ( ) var appManifestData = app.ManifestData{ - AppName: "alerting", - Group: "rules.alerting.grafana.app", + AppName: "alerting", + Group: "rules.alerting.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/correlations/Makefile b/apps/correlations/Makefile index 230bfd4149a..bc8d6d30cb5 100644 --- a/apps/correlations/Makefile +++ b/apps/correlations/Makefile @@ -6,4 +6,5 @@ generate: install-app-sdk update-app-sdk --source=./kinds/ \ --gogenpath=./pkg/apis \ --grouping=group \ + --genoperatorstate=false \ --defencoding=none \ No newline at end of file diff --git a/apps/correlations/go.mod b/apps/correlations/go.mod index 059a22bba7d..0eb82cd5292 100644 --- a/apps/correlations/go.mod +++ b/apps/correlations/go.mod @@ -79,7 +79,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/correlations/go.sum b/apps/correlations/go.sum index dcb8dea6862..6418db2eb84 100644 --- a/apps/correlations/go.sum +++ b/apps/correlations/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go index 482d497e154..fe8f518b852 100644 --- a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go +++ b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type CorrelationClient struct { @@ -76,24 +75,6 @@ func (c *CorrelationClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *CorrelationClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus CorrelationStatus, opts resource.UpdateOptions) (*Correlation, error) { - return c.client.Update(ctx, &Correlation{ - TypeMeta: metav1.TypeMeta{ - Kind: CorrelationKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *CorrelationClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go index d10cb114aa8..3191e415977 100644 --- a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go +++ b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_object_gen.go @@ -21,8 +21,6 @@ type Correlation struct { // Spec is the spec of the Correlation Spec CorrelationSpec `json:"spec" yaml:"spec"` - - Status CorrelationStatus `json:"status" yaml:"status"` } func (o *Correlation) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Correlation) SetSpec(spec any) error { } func (o *Correlation) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Correlation) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Correlation) GetSubresource(name string) (any, bool) { func (o *Correlation) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(CorrelationStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type CorrelationStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Correlation) DeepCopyInto(dst *Correlation) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *CorrelationSpec) DeepCopy() *CorrelationSpec { func (s *CorrelationSpec) DeepCopyInto(dst *CorrelationSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of CorrelationStatus -func (s *CorrelationStatus) DeepCopy() *CorrelationStatus { - cpy := &CorrelationStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies CorrelationStatus into another CorrelationStatus object -func (s *CorrelationStatus) DeepCopyInto(dst *CorrelationStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_status_gen.go b/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_status_gen.go deleted file mode 100644 index 5b8bc9f8088..00000000000 --- a/apps/correlations/pkg/apis/correlation/v0alpha1/correlation_status_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -package v0alpha1 - -// +k8s:openapi-gen=true -type CorrelationstatusOperatorState struct { - // lastEvaluation is the ResourceVersion last evaluated - LastEvaluation string `json:"lastEvaluation"` - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - State CorrelationStatusOperatorStateState `json:"state"` - // descriptiveState is an optional more descriptive state field which has no requirements on format - DescriptiveState *string `json:"descriptiveState,omitempty"` - // details contains any extra information that is operator-specific - Details map[string]interface{} `json:"details,omitempty"` -} - -// NewCorrelationstatusOperatorState creates a new CorrelationstatusOperatorState object. -func NewCorrelationstatusOperatorState() *CorrelationstatusOperatorState { - return &CorrelationstatusOperatorState{} -} - -// +k8s:openapi-gen=true -type CorrelationStatus struct { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - OperatorStates map[string]CorrelationstatusOperatorState `json:"operatorStates,omitempty"` - // additionalFields is reserved for future use - AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"` -} - -// NewCorrelationStatus creates a new CorrelationStatus object. -func NewCorrelationStatus() *CorrelationStatus { - return &CorrelationStatus{} -} - -// +k8s:openapi-gen=true -type CorrelationStatusOperatorStateState string - -const ( - CorrelationStatusOperatorStateStateSuccess CorrelationStatusOperatorStateState = "success" - CorrelationStatusOperatorStateStateInProgress CorrelationStatusOperatorStateState = "in_progress" - CorrelationStatusOperatorStateStateFailed CorrelationStatusOperatorStateState = "failed" -) diff --git a/apps/correlations/pkg/apis/correlation_manifest.go b/apps/correlations/pkg/apis/correlation_manifest.go index 293633bff4f..3a784903323 100644 --- a/apps/correlations/pkg/apis/correlation_manifest.go +++ b/apps/correlations/pkg/apis/correlation_manifest.go @@ -10,24 +10,24 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kube-openapi/pkg/spec3" - "github.com/grafana/grafana-app-sdk/app" "github.com/grafana/grafana-app-sdk/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kube-openapi/pkg/spec3" v0alpha1 "github.com/grafana/grafana/apps/correlations/pkg/apis/correlation/v0alpha1" ) var ( - rawSchemaCorrelationv0alpha1 = []byte(`{"ConfigSpec":{"additionalProperties":false,"description":"there was a deprecated field here called type, we will need to move that for conversion and provisioning","properties":{"field":{"type":"string"},"target":{"$ref":"#/components/schemas/TargetSpec"},"transformations":{"items":{"$ref":"#/components/schemas/TransformationSpec"},"type":"array"}},"required":["field","target"],"type":"object"},"Correlation":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"CorrelationType":{"enum":["query","external"],"type":"string"},"DataSourceRef":{"additionalProperties":false,"properties":{"group":{"description":"same as pluginId","type":"string"},"name":{"description":"same as grafana uid","type":"string"}},"required":["group","name"],"type":"object"},"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"TargetSpec":{"additionalProperties":{"additionalProperties":{},"type":"object"},"type":"object"},"TransformationSpec":{"additionalProperties":false,"properties":{"expression":{"type":"string"},"field":{"type":"string"},"mapValue":{"type":"string"},"type":{"type":"string"}},"required":["type","expression","field","mapValue"],"type":"object"},"spec":{"additionalProperties":false,"properties":{"config":{"$ref":"#/components/schemas/ConfigSpec"},"description":{"type":"string"},"label":{"type":"string"},"provisioned":{"type":"boolean"},"source_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"target_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"type":{"$ref":"#/components/schemas/CorrelationType"}},"required":["source_ds_ref","label","config","provisioned","type"],"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"additionalFields is reserved for future use","type":"object"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"type":"object"}}`) + rawSchemaCorrelationv0alpha1 = []byte(`{"ConfigSpec":{"additionalProperties":false,"description":"there was a deprecated field here called type, we will need to move that for conversion and provisioning","properties":{"field":{"type":"string"},"target":{"$ref":"#/components/schemas/TargetSpec"},"transformations":{"items":{"$ref":"#/components/schemas/TransformationSpec"},"type":"array"}},"required":["field","target"],"type":"object"},"Correlation":{"properties":{"spec":{"$ref":"#/components/schemas/spec"}},"required":["spec"]},"CorrelationType":{"enum":["query","external"],"type":"string"},"DataSourceRef":{"additionalProperties":false,"properties":{"group":{"description":"same as pluginId","type":"string"},"name":{"description":"same as grafana uid","type":"string"}},"required":["group","name"],"type":"object"},"TargetSpec":{"additionalProperties":{"additionalProperties":{},"type":"object"},"type":"object"},"TransformationSpec":{"additionalProperties":false,"properties":{"expression":{"type":"string"},"field":{"type":"string"},"mapValue":{"type":"string"},"type":{"type":"string"}},"required":["type","expression","field","mapValue"],"type":"object"},"spec":{"additionalProperties":false,"properties":{"config":{"$ref":"#/components/schemas/ConfigSpec"},"description":{"type":"string"},"label":{"type":"string"},"provisioned":{"type":"boolean"},"source_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"target_ds_ref":{"$ref":"#/components/schemas/DataSourceRef"},"type":{"$ref":"#/components/schemas/CorrelationType"}},"required":["source_ds_ref","label","config","provisioned","type"],"type":"object"}}`) versionSchemaCorrelationv0alpha1 app.VersionSchema _ = json.Unmarshal(rawSchemaCorrelationv0alpha1, &versionSchemaCorrelationv0alpha1) ) var appManifestData = app.ManifestData{ - AppName: "correlation", - Group: "correlations.grafana.app", + AppName: "correlation", + Group: "correlations.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts b/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts index ff5b1b8f03f..05bbaf69516 100644 --- a/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts +++ b/apps/correlations/plugin/src/generated/correlation/v0alpha1/correlation_object_gen.ts @@ -2,7 +2,6 @@ * This file was generated by grafana-app-sdk. DO NOT EDIT. */ import { Spec } from './types.spec.gen'; -import { Status } from './types.status.gen'; export interface Metadata { name: string; @@ -45,5 +44,4 @@ export interface Correlation { apiVersion: string; metadata: Metadata; spec: Spec; - status: Status; } diff --git a/apps/correlations/plugin/src/generated/correlation/v0alpha1/types.status.gen.ts b/apps/correlations/plugin/src/generated/correlation/v0alpha1/types.status.gen.ts deleted file mode 100644 index 01be8df7961..00000000000 --- a/apps/correlations/plugin/src/generated/correlation/v0alpha1/types.status.gen.ts +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -export interface OperatorState { - // lastEvaluation is the ResourceVersion last evaluated - lastEvaluation: string; - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - state: "success" | "in_progress" | "failed"; - // descriptiveState is an optional more descriptive state field which has no requirements on format - descriptiveState?: string; - // details contains any extra information that is operator-specific - details?: Record; -} - -export const defaultOperatorState = (): OperatorState => ({ - lastEvaluation: "", - state: "success", -}); - -export interface Status { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - operatorStates?: Record; - // additionalFields is reserved for future use - additionalFields?: Record; -} - -export const defaultStatus = (): Status => ({ -}); - diff --git a/apps/dashboard/go.mod b/apps/dashboard/go.mod index 4a0f68552a6..bb988e36d77 100644 --- a/apps/dashboard/go.mod +++ b/apps/dashboard/go.mod @@ -4,7 +4,7 @@ go 1.24.6 require ( cuelang.org/go v0.11.1 - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 github.com/grafana/grafana-app-sdk v0.46.0 github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana-plugin-sdk-go v0.279.0 @@ -51,7 +51,7 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/otel-profiling-go v0.5.1 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect @@ -134,7 +134,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/apps/dashboard/go.sum b/apps/dashboard/go.sum index 7b6658192df..ebff69a4fd5 100644 --- a/apps/dashboard/go.sum +++ b/apps/dashboard/go.sum @@ -95,10 +95,10 @@ github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25d github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.46.0 h1:gvzQvCQgZJ/73BfAcbDt/6TAMhnVikVPxZt/UwDl+oc= @@ -386,8 +386,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/dashboard/pkg/apis/dashboard_manifest.go b/apps/dashboard/pkg/apis/dashboard_manifest.go index 1e8f261e65b..3ea4e0284b3 100644 --- a/apps/dashboard/pkg/apis/dashboard_manifest.go +++ b/apps/dashboard/pkg/apis/dashboard_manifest.go @@ -21,8 +21,9 @@ import ( ) var appManifestData = app.ManifestData{ - AppName: "dashboard", - Group: "dashboard.grafana.app", + AppName: "dashboard", + Group: "dashboard.grafana.app", + PreferredVersion: "v1beta1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/dashboard/pkg/migration/frontend_defaults.go b/apps/dashboard/pkg/migration/frontend_defaults.go index 6cce349a338..6f4cbbf87a4 100644 --- a/apps/dashboard/pkg/migration/frontend_defaults.go +++ b/apps/dashboard/pkg/migration/frontend_defaults.go @@ -198,7 +198,7 @@ func sortPanelsByGridPos(dashboard map[string]interface{}) { return } - sort.Slice(panels, func(i, j int) bool { + sort.SliceStable(panels, func(i, j int) bool { panelA := panels[i] panelB := panels[j] @@ -831,7 +831,7 @@ func cleanupPanelList(panels []interface{}) { // sortPanelsByGridPosition sorts panels by grid position (matches frontend sortPanelsByGridPos behavior) func sortPanelsByGridPosition(panels []interface{}) { - sort.Slice(panels, func(i, j int) bool { + sort.SliceStable(panels, func(i, j int) bool { panelA, okA := panels[i].(map[string]interface{}) panelB, okB := panels[j].(map[string]interface{}) if !okA || !okB { diff --git a/apps/dashboard/pkg/migration/schemaversion/v16.go b/apps/dashboard/pkg/migration/schemaversion/v16.go index f360fe246ad..da6db1dcc1a 100644 --- a/apps/dashboard/pkg/migration/schemaversion/v16.go +++ b/apps/dashboard/pkg/migration/schemaversion/v16.go @@ -49,10 +49,15 @@ func upgradeToGridLayout(dashboard map[string]interface{}) { maxPanelID := getMaxPanelID(rows) nextRowID := maxPanelID + 1 - // Get existing panels - var finalPanels []interface{} - if existingPanels, ok := dashboard["panels"].([]interface{}); ok { - finalPanels = existingPanels + // Match frontend: dashboard.panels already exists with top-level panels + // The frontend's this.dashboard.panels is initialized in the constructor with existing panels + // Then upgradeToGridLayout adds more panels to it + + // Initialize panels array - make a copy to avoid modifying the original + panels := []interface{}{} + if existingPanels, ok := dashboard["panels"].([]interface{}); ok && len(existingPanels) > 0 { + // Copy existing panels to preserve order + panels = append(panels, existingPanels...) } // Add special "row" panels if even one row is collapsed, repeated or has visible title (line 1028 in TS) @@ -72,7 +77,14 @@ func upgradeToGridLayout(dashboard map[string]interface{}) { height := getRowHeight(row) rowGridHeight := getGridHeight(height) - isCollapsed := GetBoolValue(row, "collapse") + // Check if collapse property exists and get its value + collapseValue, hasCollapseProperty := row["collapse"] + isCollapsed := false + if hasCollapseProperty { + if b, ok := collapseValue.(bool); ok { + isCollapsed = b + } + } var rowPanel map[string]interface{} @@ -110,9 +122,9 @@ func upgradeToGridLayout(dashboard map[string]interface{}) { }, } - // Set collapsed property only if the original row had a collapse property - // This matches the frontend behavior: rowPanel.collapsed = row.collapse - if _, hasCollapse := row["collapse"]; hasCollapse { + // Match frontend behavior: rowPanel.collapsed = row.collapse (line 1065 in TS) + // Only set collapsed property if the original row had a collapse property + if hasCollapseProperty { rowPanel["collapsed"] = isCollapsed } nextRowID++ @@ -128,23 +140,13 @@ func upgradeToGridLayout(dashboard map[string]interface{}) { continue } - // Set default span (line 1063 in TS) - span := GetFloatValue(panel, "span", defaultPanelSpan) - - // Handle minSpan conversion (lines 1064-1066 in TS) - if minSpan, hasMinSpan := panel["minSpan"]; hasMinSpan { - if minSpanFloat, ok := ConvertToFloat(minSpan); ok && minSpanFloat > 0 { - panel["minSpan"] = int(math.Min(float64(gridColumnCount), (float64(gridColumnCount)/12.0)*minSpanFloat)) - } + // Match frontend logic: panel.span = panel.span || DEFAULT_PANEL_SPAN (line 1082 in TS) + span := GetFloatValue(panel, "span", 0) + if span == 0 { + span = defaultPanelSpan } - panelWidth := int(math.Floor(span * widthFactor)) - panelHeight := rowGridHeight - if panelHeightValue, hasHeight := panel["height"]; hasHeight { - if h, ok := ConvertToFloat(panelHeightValue); ok { - panelHeight = getGridHeight(h) - } - } + panelWidth, panelHeight := calculatePanelDimensionsFromSpan(span, panel, widthFactor, rowGridHeight) panelPos := rowArea.getPanelPosition(panelHeight, panelWidth) yPos = rowArea.yPos @@ -161,21 +163,21 @@ func upgradeToGridLayout(dashboard map[string]interface{}) { // Remove span (line 1080 in TS) delete(panel, "span") - // Exact logic from lines 1082-1086 in TS + // Match frontend logic: lines 1101-1105 in TS if rowPanel != nil && isCollapsed { - // Add to collapsed row's nested panels + // Add to collapsed row's nested panels (line 1102) if rowPanelPanels, ok := rowPanel["panels"].([]interface{}); ok { rowPanel["panels"] = append(rowPanelPanels, panel) } } else { - // Add directly to dashboard panels - finalPanels = append(finalPanels, panel) + // Add directly to panels array like frontend (line 1104) + panels = append(panels, panel) } } - // Add row panel after processing all panels (lines 1089-1091 in TS) + // Add row panel after regular panels from this row (lines 1108-1110 in TS) if rowPanel != nil { - finalPanels = append(finalPanels, rowPanel) + panels = append(panels, rowPanel) } // Update yPos (lines 1093-1095 in TS) @@ -185,7 +187,7 @@ func upgradeToGridLayout(dashboard map[string]interface{}) { } // Update the dashboard - dashboard["panels"] = finalPanels + dashboard["panels"] = panels delete(dashboard, "rows") } @@ -315,3 +317,24 @@ func getGridHeight(height float64) int { } return int(math.Ceil(height / panelHeightStep)) } + +func calculatePanelDimensionsFromSpan(span float64, panel map[string]interface{}, widthFactor float64, defaultHeight int) (int, int) { + // span should already be normalized by caller (line 1082 in DashboardMigrator.ts) + + if minSpan, hasMinSpan := panel["minSpan"]; hasMinSpan { + if minSpanFloat, ok := ConvertToFloat(minSpan); ok && minSpanFloat > 0 { + panel["minSpan"] = int(math.Min(float64(gridColumnCount), (float64(gridColumnCount)/12.0)*minSpanFloat)) + } + } + + panelWidth := int(math.Floor(span * widthFactor)) + panelHeight := defaultHeight + + if panelHeightValue, hasHeight := panel["height"]; hasHeight { + if h, ok := ConvertToFloat(panelHeightValue); ok { + panelHeight = getGridHeight(h) + } + } + + return panelWidth, panelHeight +} diff --git a/apps/dashboard/pkg/migration/schemaversion/v16_test.go b/apps/dashboard/pkg/migration/schemaversion/v16_test.go index 4ce1b423af3..007e912fc85 100644 --- a/apps/dashboard/pkg/migration/schemaversion/v16_test.go +++ b/apps/dashboard/pkg/migration/schemaversion/v16_test.go @@ -1532,6 +1532,123 @@ func TestV16(t *testing.T) { // rows field should be removed }, }, + { + name: "should handle span zero by defaulting to DEFAULT_PANEL_SPAN", + input: map[string]interface{}{ + "schemaVersion": 15, + "rows": []interface{}{ + map[string]interface{}{ + "collapse": false, + "showTitle": true, // Need this to create row panel + "title": "Test Row", + "height": 250, + "panels": []interface{}{ + map[string]interface{}{ + "id": 1, + "type": "graph", + "span": 0, // This should be defaulted to 4 (DEFAULT_PANEL_SPAN) + }, + map[string]interface{}{ + "id": 2, + "type": "stat", + "span": 6, // Normal span value + }, + }, + }, + }, + }, + expected: map[string]interface{}{ + "schemaVersion": 16, + "panels": []interface{}{ + map[string]interface{}{ + "id": 1, + "type": "graph", + "gridPos": map[string]interface{}{ + "x": 0, + "y": 1, + "w": 8, // span 0 -> DEFAULT_PANEL_SPAN (4) -> 4 * 2 = 8 width + "h": 7, // default height + }, + }, + map[string]interface{}{ + "id": 2, + "type": "stat", + "gridPos": map[string]interface{}{ + "x": 8, // After first panel + "y": 1, + "w": 12, // span 6 -> 6 * 2 = 12 width + "h": 7, // default height + }, + }, + // Row panel should be created because showTitle is true + map[string]interface{}{ + "id": 3, + "type": "row", + "title": "Test Row", + "collapsed": false, // Set because input has "collapse": false + "repeat": "", + "panels": []interface{}{}, + "gridPos": map[string]interface{}{ + "x": 0, + "y": 0, + "w": 24, + "h": 7, + }, + }, + }, + }, + }, + { + name: "should not set collapsed property when input row has no collapse property", + input: map[string]interface{}{ + "schemaVersion": 15, + "rows": []interface{}{ + map[string]interface{}{ + // No "collapse" property in input + "showTitle": true, + "title": "Test Row", + "height": 250, + "panels": []interface{}{ + map[string]interface{}{ + "id": 1, + "type": "graph", + "span": 12, + }, + }, + }, + }, + }, + expected: map[string]interface{}{ + "schemaVersion": 16, + "panels": []interface{}{ + map[string]interface{}{ + "id": 1, + "type": "graph", + "gridPos": map[string]interface{}{ + "x": 0, + "y": 1, + "w": 24, // span 12 -> 12 * 2 = 24 width + "h": 7, // default height + }, + }, + // Row panel should be created because showTitle is true + map[string]interface{}{ + "id": 2, + "type": "row", + "title": "Test Row", + // No "collapsed" property because input had no "collapse" property + "repeat": "", + "panels": []interface{}{}, + "gridPos": map[string]interface{}{ + "x": 0, + "y": 0, + "w": 24, + "h": 7, + }, + }, + }, + }, + }, } runMigrationTests(t, tests, schemaversion.V16) diff --git a/apps/dashboard/pkg/migration/testdata/input/v16.span_zero_demo.json b/apps/dashboard/pkg/migration/testdata/input/v16.span_zero_demo.json new file mode 100644 index 00000000000..5dbb57fa244 --- /dev/null +++ b/apps/dashboard/pkg/migration/testdata/input/v16.span_zero_demo.json @@ -0,0 +1,687 @@ +{ + "__requires": [ + { + "id": "grafana", + "name": "Grafana", + "type": "grafana", + "version": "8.0.0" + } + ], + "annotations": { + "list": [] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + { + "icon": "external link", + "targetBlank": true, + "title": "External Documentation", + "type": "link", + "url": "https://example.com/docs" + } + ], + "panels": [ + { + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "options": { + "content": "This dashboard demonstrates various monitoring components for application observability and performance metrics.\n", + "mode": "markdown" + }, + "title": "Application Monitoring", + "type": "text" + } + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "height": "250px", + "panels": [ + { + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 6, + "options": { + "content": "This service handles background processing tasks for the application system. It manages various types of operations including data synchronization, resource management, and batch processing.\n\nSupported operation types:\n1. Sync: Synchronizes data between different systems\n2. Process: Handles batch data processing tasks\n3. Cleanup: Removes outdated or temporary resources\n4. Update: Applies configuration changes across services\n\nService dependencies:\n- Data API: For reading and writing application data\n- Configuration Service: For managing system settings\n- Queue Service: For handling task scheduling\n- Storage Service: For persistent data management\n- Auth Service: For authentication and authorization\n- Metrics Service: For collecting operational statistics\n", + "mode": "markdown" + }, + "span": 0, + "title": "Service Overview", + "type": "text" + }, + { + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 7, + "options": { + "content": "Error monitoring helps identify issues in the system. This section displays error logs and success rates for operations.", + "mode": "markdown" + }, + "span": 0, + "title": "Error Monitoring", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": 0 + }, + { + "color": "yellow", + "value": 0.95 + }, + { + "color": "green", + "value": 1 + } + ] + }, + "unit": "percentunit" + } + }, + "gridPos": { + "h": 9, + "w": 3, + "x": 0, + "y": 19 + }, + "id": 8, + "span": 0, + "targets": [ + { + "expr": "sum by (action) (app_jobs_processed_total{outcome=\"success\", cluster=\"$cluster\", namespace=\"default\"})\n/\nsum by (action) (app_jobs_processed_total{cluster=\"$cluster\", namespace=\"default\"})\n", + "legendFormat": "{{action}}" + } + ], + "title": "Job Success Rate", + "type": "stat" + }, + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "gridPos": { + "h": 9, + "w": 10, + "x": 3, + "y": 19 + }, + "id": 9, + "options": { + "enableLogDetails": true, + "showTime": false, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "span": 0, + "targets": [ + { + "expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt | level=\"error\"" + } + ], + "title": "Errors", + "type": "logs" + }, + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "gridPos": { + "h": 9, + "w": 11, + "x": 13, + "y": 19 + }, + "id": 10, + "options": { + "enableLogDetails": true, + "showTime": false, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "span": 0, + "targets": [ + { + "expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt" + } + ], + "title": "All", + "type": "logs" + }, + { + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 11, + "options": { + "content": "Performance monitoring examines factors that affect system response times, including operation duration, queue lengths, and processing delays. This section provides metrics and traces for performance analysis.\n", + "mode": "markdown" + }, + "span": 0, + "title": "Performance Analysis", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Number of concurrent processing threads available for handling operations", + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 31 + }, + "id": 12, + "span": 0, + "targets": [ + { + "expr": "max(app_worker_threads_active{cluster=\"$cluster\", namespace=\"default\"})", + "instant": true + } + ], + "title": "Concurrent Job Drivers", + "type": "stat" + }, + { + "datasource": { + "type": "tempo", + "uid": "${tempo}" + }, + "gridPos": { + "h": 6, + "w": 19, + "x": 5, + "y": 31 + }, + "id": 13, + "span": 0, + "targets": [ + { + "filters": [ + { + "id": "span-name", + "operator": "=", + "scope": "span", + "tag": "name", + "value": [ + "provisioning.sync.process" + ] + }, + { + "id": "k8s-cluster-name", + "operator": "=", + "scope": "resource", + "tag": "k8s.cluster.name", + "value": [ + "$cluster" + ] + } + ], + "query": "{name=\"app.operation.process\"}", + "queryType": "traceqlSearch" + } + ], + "title": "Recent Operation Traces", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed", + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 2 + }, + { + "color": "red", + "value": 5 + } + ] + }, + "unit": "s" + } + }, + "gridPos": { + "h": 10, + "w": 8, + "x": 0, + "y": 55 + }, + "id": 14, + "span": 0, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0", + "legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.9, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0", + "legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0", + "legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0", + "legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}", + "refId": "E" + } + ], + "timeFrom": "7d", + "title": "7d avg of job durations", + "transformations": [ + { + "id": "reduce", + "options": { + "mode": "seriesToRows", + "reducers": [ + "mean" + ] + } + }, + { + "id": "seriesToRows" + }, + { + "id": "organize", + "options": { + "renameByName": { + "Field": "Type", + "Mean": "Avg Duration", + "Metric": "Legend", + "Value": "Duration" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed", + "gridPos": { + "h": 10, + "w": 16, + "x": 8, + "y": 55 + }, + "id": 15, + "span": 0, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}", + "refId": "E" + } + ], + "title": "Job Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Total number of jobs waiting to be processed", + "gridPos": { + "h": 5, + "w": 4, + "x": 0, + "y": 65 + }, + "id": 16, + "span": 0, + "targets": [ + { + "expr": "clamp_min(sum(app_operation_queue_size{cluster=\"$cluster\", namespace=\"default\"}), 0)", + "legendFormat": "Queue size" + } + ], + "title": "Queue Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "fieldConfig": { + "defaults": { + "unit": "s" + } + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 4, + "y": 65 + }, + "id": 17, + "span": 0, + "targets": [ + { + "expr": "avg(histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le)))", + "legendFormat": "Queue size" + } + ], + "timeFrom": "7d", + "title": "7d avg Queue Wait Time", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "How long a job is in the queue before being picked up", + "gridPos": { + "h": 5, + "w": 16, + "x": 8, + "y": 65 + }, + "id": 18, + "span": 0, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.99", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.95", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.5", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.1, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.1", + "refId": "E" + } + ], + "title": "Queue Wait Time", + "type": "timeseries" + }, + { + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 19, + "options": { + "content": "Resource utilization monitoring for application containers", + "mode": "markdown" + }, + "span": 0, + "title": "Resource Monitoring", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 9, + "w": 7, + "x": 0, + "y": 55 + }, + "id": 20, + "span": 0, + "targets": [ + { + "expr": "count by (cluster, channel)(label_replace(label_replace(kube_pod_container_info{namespace=\"default\", container=\"app-worker\", pod=~\"app-worker.*\", cluster=~\"$cluster\"}, \"version\", \"$1\", \"image\", \".+:(.+)\"), \"channel\", \"$1\", \"container\", \".+-(.+)\"))", + "legendFormat": "{{cluster}}" + } + ], + "title": "Running Pod(s)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 7, + "y": 55 + }, + "id": 21, + "span": 0, + "targets": [ + { + "expr": "max(kube_pod_container_resource_requests{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})", + "legendFormat": "Memory Request" + }, + { + "expr": "max(kube_pod_container_resource_limits{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})", + "legendFormat": "Memory Limit" + }, + { + "expr": "max(container_memory_usage_bytes{namespace=\"default\",cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"}) by (pod)", + "legendFormat": "Container usage {{pod}}" + } + ], + "title": "Memory Utilization", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 15, + "y": 55 + }, + "id": 22, + "span": 0, + "targets": [ + { + "expr": "sum(irate(container_cpu_usage_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container, cpu)", + "legendFormat": "Usage {{pod}}" + }, + { + "expr": "sum(irate(container_cpu_cfs_throttled_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container)", + "legendFormat": "Throttling {{pod}}" + }, + { + "expr": "max(kube_pod_container_resource_limits{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})", + "legendFormat": "CPU limit" + }, + { + "expr": "max(kube_pod_container_resource_requests{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})", + "legendFormat": "CPU request" + } + ], + "title": "CPU Utilization", + "type": "timeseries" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Application Service", + "titleSize": "h6" + } + ], + "schemaVersion": 15, + "style": "dark", + "tags": [ + "as-code" + ], + "templating": { + "list": [ + { + "current": { + "value": "prometheus-datasource" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "value": "prometheus-datasource" + }, + "name": "prom", + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "value": "loki-datasource" + }, + "name": "loki", + "query": "loki", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "text": "tempo-datasource", + "value": "tempo-datasource" + }, + "name": "tempo", + "query": "tempo", + "refresh": 1, + "regex": ".*tempo.*", + "type": "datasource" + }, + { + "current": { + "text": "demo-cluster", + "value": "demo-cluster" + }, + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "name": "cluster", + "query": "label_values(app_worker_threads_active,cluster)", + "refresh": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Span Zero Demo Dashboard", + "uid": "span-zero-demo-dashboard", + "version": 0 +} diff --git a/apps/dashboard/pkg/migration/testdata/output/latest_version/v16.span_zero_demo.v42.json b/apps/dashboard/pkg/migration/testdata/output/latest_version/v16.span_zero_demo.v42.json new file mode 100644 index 00000000000..91012684ebf --- /dev/null +++ b/apps/dashboard/pkg/migration/testdata/output/latest_version/v16.span_zero_demo.v42.json @@ -0,0 +1,881 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations \u0026 Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [ + { + "icon": "external link", + "targetBlank": true, + "title": "External Documentation", + "type": "link", + "url": "https://example.com/docs" + } + ], + "panels": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "content": "This dashboard demonstrates various monitoring components for application observability and performance metrics.\n", + "mode": "markdown" + }, + "targets": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "refId": "A" + } + ], + "title": "Application Monitoring", + "type": "text" + }, + { + "collapsed": false, + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 23, + "panels": [], + "targets": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "refId": "A" + } + ], + "title": "Application Service", + "type": "row" + }, + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 6, + "options": { + "content": "This service handles background processing tasks for the application system. It manages various types of operations including data synchronization, resource management, and batch processing.\n\nSupported operation types:\n1. Sync: Synchronizes data between different systems\n2. Process: Handles batch data processing tasks\n3. Cleanup: Removes outdated or temporary resources\n4. Update: Applies configuration changes across services\n\nService dependencies:\n- Data API: For reading and writing application data\n- Configuration Service: For managing system settings\n- Queue Service: For handling task scheduling\n- Storage Service: For persistent data management\n- Auth Service: For authentication and authorization\n- Metrics Service: For collecting operational statistics\n", + "mode": "markdown" + }, + "targets": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "refId": "A" + } + ], + "title": "Service Overview", + "type": "text" + }, + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 7, + "options": { + "content": "Error monitoring helps identify issues in the system. This section displays error logs and success rates for operations.", + "mode": "markdown" + }, + "targets": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "refId": "A" + } + ], + "title": "Error Monitoring", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": 0 + }, + { + "color": "yellow", + "value": 0.95 + }, + { + "color": "green", + "value": 1 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 8, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "sum by (action) (app_jobs_processed_total{outcome=\"success\", cluster=\"$cluster\", namespace=\"default\"})\n/\nsum by (action) (app_jobs_processed_total{cluster=\"$cluster\", namespace=\"default\"})\n", + "legendFormat": "{{action}}", + "refId": "A" + } + ], + "title": "Job Success Rate", + "type": "stat" + }, + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 9, + "options": { + "enableLogDetails": true, + "showTime": false, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt | level=\"error\"", + "refId": "A" + } + ], + "title": "Errors", + "type": "logs" + }, + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 8 + }, + "id": 10, + "options": { + "enableLogDetails": true, + "showTime": false, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt", + "refId": "A" + } + ], + "title": "All", + "type": "logs" + }, + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 11, + "options": { + "content": "Performance monitoring examines factors that affect system response times, including operation duration, queue lengths, and processing delays. This section provides metrics and traces for performance analysis.\n", + "mode": "markdown" + }, + "targets": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "refId": "A" + } + ], + "title": "Performance Analysis", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Number of concurrent processing threads available for handling operations", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 15 + }, + "id": 12, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "max(app_worker_threads_active{cluster=\"$cluster\", namespace=\"default\"})", + "instant": true, + "refId": "A" + } + ], + "title": "Concurrent Job Drivers", + "type": "stat" + }, + { + "datasource": { + "type": "tempo", + "uid": "${tempo}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 15 + }, + "id": 13, + "targets": [ + { + "datasource": { + "type": "tempo", + "uid": "${tempo}" + }, + "filters": [ + { + "id": "span-name", + "operator": "=", + "scope": "span", + "tag": "name", + "value": [ + "provisioning.sync.process" + ] + }, + { + "id": "k8s-cluster-name", + "operator": "=", + "scope": "resource", + "tag": "k8s.cluster.name", + "value": [ + "$cluster" + ] + } + ], + "query": "{name=\"app.operation.process\"}", + "queryType": "traceqlSearch", + "refId": "A" + } + ], + "title": "Recent Operation Traces", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed", + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 2 + }, + { + "color": "red", + "value": 5 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 15 + }, + "id": 14, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.9, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}", + "refId": "E" + } + ], + "timeFrom": "7d", + "title": "7d avg of job durations", + "transformations": [ + { + "id": "reduce", + "options": { + "mode": "seriesToRows", + "reducers": [ + "mean" + ] + } + }, + { + "id": "seriesToRows" + }, + { + "id": "organize", + "options": { + "renameByName": { + "Field": "Type", + "Mean": "Avg Duration", + "Metric": "Legend", + "Value": "Duration" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 22 + }, + "id": 15, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.95, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}", + "refId": "E" + } + ], + "title": "Job Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Total number of jobs waiting to be processed", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 22 + }, + "id": 16, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "clamp_min(sum(app_operation_queue_size{cluster=\"$cluster\", namespace=\"default\"}), 0)", + "legendFormat": "Queue size", + "refId": "A" + } + ], + "title": "Queue Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 22 + }, + "id": 17, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "avg(histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le)))", + "legendFormat": "Queue size", + "refId": "A" + } + ], + "timeFrom": "7d", + "title": "7d avg Queue Wait Time", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "How long a job is in the queue before being picked up", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 29 + }, + "id": 18, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.99, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.99", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.95, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.95", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.5", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "histogram_quantile(0.1, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.1", + "refId": "E" + } + ], + "title": "Queue Wait Time", + "type": "timeseries" + }, + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 29 + }, + "id": 19, + "options": { + "content": "Resource utilization monitoring for application containers", + "mode": "markdown" + }, + "targets": [ + { + "datasource": { + "apiVersion": "v1", + "type": "prometheus", + "uid": "default-ds-uid" + }, + "refId": "A" + } + ], + "title": "Resource Monitoring", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 29 + }, + "id": 20, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "count by (cluster, channel)(label_replace(label_replace(kube_pod_container_info{namespace=\"default\", container=\"app-worker\", pod=~\"app-worker.*\", cluster=~\"$cluster\"}, \"version\", \"$1\", \"image\", \".+:(.+)\"), \"channel\", \"$1\", \"container\", \".+-(.+)\"))", + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "title": "Running Pod(s)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 36 + }, + "id": 21, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "max(kube_pod_container_resource_requests{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})", + "legendFormat": "Memory Request", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "max(kube_pod_container_resource_limits{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})", + "legendFormat": "Memory Limit", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "max(container_memory_usage_bytes{namespace=\"default\",cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"}) by (pod)", + "legendFormat": "Container usage {{pod}}", + "refId": "C" + } + ], + "title": "Memory Utilization", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 36 + }, + "id": 22, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "sum(irate(container_cpu_usage_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container, cpu)", + "legendFormat": "Usage {{pod}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "sum(irate(container_cpu_cfs_throttled_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container)", + "legendFormat": "Throttling {{pod}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "max(kube_pod_container_resource_limits{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})", + "legendFormat": "CPU limit", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "expr": "max(kube_pod_container_resource_requests{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})", + "legendFormat": "CPU request", + "refId": "D" + } + ], + "title": "CPU Utilization", + "type": "timeseries" + } + ], + "refresh": "10s", + "schemaVersion": 42, + "tags": [ + "as-code" + ], + "templating": { + "list": [ + { + "current": { + "value": "prometheus-datasource" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "value": "prometheus-datasource" + }, + "name": "prom", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "value": "loki-datasource" + }, + "name": "loki", + "options": [], + "query": "loki", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "text": "tempo-datasource", + "value": "tempo-datasource" + }, + "name": "tempo", + "options": [], + "query": "tempo", + "refresh": 1, + "regex": ".*tempo.*", + "type": "datasource" + }, + { + "current": { + "text": "demo-cluster", + "value": "demo-cluster" + }, + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "name": "cluster", + "options": [], + "query": "label_values(app_worker_threads_active,cluster)", + "refresh": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "utc", + "title": "Span Zero Demo Dashboard", + "uid": "span-zero-demo-dashboard", + "weekStart": "" +} \ No newline at end of file diff --git a/apps/dashboard/pkg/migration/testdata/output/single_version/v16.span_zero_demo.v16.json b/apps/dashboard/pkg/migration/testdata/output/single_version/v16.span_zero_demo.v16.json new file mode 100644 index 00000000000..089f4ac16d3 --- /dev/null +++ b/apps/dashboard/pkg/migration/testdata/output/single_version/v16.span_zero_demo.v16.json @@ -0,0 +1,694 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations \u0026 Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [ + { + "icon": "external link", + "targetBlank": true, + "title": "External Documentation", + "type": "link", + "url": "https://example.com/docs" + } + ], + "panels": [ + { + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "content": "This dashboard demonstrates various monitoring components for application observability and performance metrics.\n", + "mode": "markdown" + }, + "title": "Application Monitoring", + "type": "text" + }, + { + "collapsed": false, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 23, + "panels": [], + "title": "Application Service", + "type": "row" + }, + { + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 6, + "options": { + "content": "This service handles background processing tasks for the application system. It manages various types of operations including data synchronization, resource management, and batch processing.\n\nSupported operation types:\n1. Sync: Synchronizes data between different systems\n2. Process: Handles batch data processing tasks\n3. Cleanup: Removes outdated or temporary resources\n4. Update: Applies configuration changes across services\n\nService dependencies:\n- Data API: For reading and writing application data\n- Configuration Service: For managing system settings\n- Queue Service: For handling task scheduling\n- Storage Service: For persistent data management\n- Auth Service: For authentication and authorization\n- Metrics Service: For collecting operational statistics\n", + "mode": "markdown" + }, + "title": "Service Overview", + "type": "text" + }, + { + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 7, + "options": { + "content": "Error monitoring helps identify issues in the system. This section displays error logs and success rates for operations.", + "mode": "markdown" + }, + "title": "Error Monitoring", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": 0 + }, + { + "color": "yellow", + "value": 0.95 + }, + { + "color": "green", + "value": 1 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 8, + "targets": [ + { + "expr": "sum by (action) (app_jobs_processed_total{outcome=\"success\", cluster=\"$cluster\", namespace=\"default\"})\n/\nsum by (action) (app_jobs_processed_total{cluster=\"$cluster\", namespace=\"default\"})\n", + "legendFormat": "{{action}}", + "refId": "A" + } + ], + "title": "Job Success Rate", + "type": "stat" + }, + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 9, + "options": { + "enableLogDetails": true, + "showTime": false, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt | level=\"error\"", + "refId": "A" + } + ], + "title": "Errors", + "type": "logs" + }, + { + "datasource": { + "type": "loki", + "uid": "${loki}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 8 + }, + "id": 10, + "options": { + "enableLogDetails": true, + "showTime": false, + "sortOrder": "Descending", + "wrapLogMessage": true + }, + "targets": [ + { + "expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt", + "refId": "A" + } + ], + "title": "All", + "type": "logs" + }, + { + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 11, + "options": { + "content": "Performance monitoring examines factors that affect system response times, including operation duration, queue lengths, and processing delays. This section provides metrics and traces for performance analysis.\n", + "mode": "markdown" + }, + "title": "Performance Analysis", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Number of concurrent processing threads available for handling operations", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 15 + }, + "id": 12, + "targets": [ + { + "expr": "max(app_worker_threads_active{cluster=\"$cluster\", namespace=\"default\"})", + "instant": true, + "refId": "A" + } + ], + "title": "Concurrent Job Drivers", + "type": "stat" + }, + { + "datasource": { + "type": "tempo", + "uid": "${tempo}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 15 + }, + "id": 13, + "targets": [ + { + "filters": [ + { + "id": "span-name", + "operator": "=", + "scope": "span", + "tag": "name", + "value": [ + "provisioning.sync.process" + ] + }, + { + "id": "k8s-cluster-name", + "operator": "=", + "scope": "resource", + "tag": "k8s.cluster.name", + "value": [ + "$cluster" + ] + } + ], + "query": "{name=\"app.operation.process\"}", + "queryType": "traceqlSearch", + "refId": "A" + } + ], + "title": "Recent Operation Traces", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed", + "fieldConfig": { + "defaults": { + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 2 + }, + { + "color": "red", + "value": 5 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 15 + }, + "id": 14, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.9, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0", + "legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}", + "refId": "E" + } + ], + "timeFrom": "7d", + "title": "7d avg of job durations", + "transformations": [ + { + "id": "reduce", + "options": { + "mode": "seriesToRows", + "reducers": [ + "mean" + ] + } + }, + { + "id": "seriesToRows" + }, + { + "id": "organize", + "options": { + "renameByName": { + "Field": "Type", + "Mean": "Avg Duration", + "Metric": "Legend", + "Value": "Duration" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 22 + }, + "id": 15, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))", + "legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}", + "refId": "E" + } + ], + "title": "Job Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "Total number of jobs waiting to be processed", + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 22 + }, + "id": 16, + "targets": [ + { + "expr": "clamp_min(sum(app_operation_queue_size{cluster=\"$cluster\", namespace=\"default\"}), 0)", + "legendFormat": "Queue size", + "refId": "A" + } + ], + "title": "Queue Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 22 + }, + "id": 17, + "targets": [ + { + "expr": "avg(histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le)))", + "legendFormat": "Queue size", + "refId": "A" + } + ], + "timeFrom": "7d", + "title": "7d avg Queue Wait Time", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "description": "How long a job is in the queue before being picked up", + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 29 + }, + "id": 18, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.99", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.95", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.5", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.1, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))", + "legendFormat": "q0.1", + "refId": "E" + } + ], + "title": "Queue Wait Time", + "type": "timeseries" + }, + { + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 29 + }, + "id": 19, + "options": { + "content": "Resource utilization monitoring for application containers", + "mode": "markdown" + }, + "title": "Resource Monitoring", + "type": "text" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 29 + }, + "id": 20, + "targets": [ + { + "expr": "count by (cluster, channel)(label_replace(label_replace(kube_pod_container_info{namespace=\"default\", container=\"app-worker\", pod=~\"app-worker.*\", cluster=~\"$cluster\"}, \"version\", \"$1\", \"image\", \".+:(.+)\"), \"channel\", \"$1\", \"container\", \".+-(.+)\"))", + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "title": "Running Pod(s)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 36 + }, + "id": 21, + "targets": [ + { + "expr": "max(kube_pod_container_resource_requests{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})", + "legendFormat": "Memory Request", + "refId": "A" + }, + { + "expr": "max(kube_pod_container_resource_limits{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})", + "legendFormat": "Memory Limit", + "refId": "B" + }, + { + "expr": "max(container_memory_usage_bytes{namespace=\"default\",cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"}) by (pod)", + "legendFormat": "Container usage {{pod}}", + "refId": "C" + } + ], + "title": "Memory Utilization", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 36 + }, + "id": 22, + "targets": [ + { + "expr": "sum(irate(container_cpu_usage_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container, cpu)", + "legendFormat": "Usage {{pod}}", + "refId": "A" + }, + { + "expr": "sum(irate(container_cpu_cfs_throttled_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container)", + "legendFormat": "Throttling {{pod}}", + "refId": "B" + }, + { + "expr": "max(kube_pod_container_resource_limits{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})", + "legendFormat": "CPU limit", + "refId": "C" + }, + { + "expr": "max(kube_pod_container_resource_requests{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})", + "legendFormat": "CPU request", + "refId": "D" + } + ], + "title": "CPU Utilization", + "type": "timeseries" + } + ], + "refresh": "10s", + "schemaVersion": 16, + "tags": [ + "as-code" + ], + "templating": { + "list": [ + { + "current": { + "value": "prometheus-datasource" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "value": "prometheus-datasource" + }, + "name": "prom", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "value": "loki-datasource" + }, + "name": "loki", + "options": [], + "query": "loki", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "text": "tempo-datasource", + "value": "tempo-datasource" + }, + "name": "tempo", + "options": [], + "query": "tempo", + "refresh": 1, + "regex": ".*tempo.*", + "type": "datasource" + }, + { + "current": { + "text": "demo-cluster", + "value": "demo-cluster" + }, + "datasource": { + "type": "prometheus", + "uid": "${prom}" + }, + "name": "cluster", + "options": [], + "query": "label_values(app_worker_threads_active,cluster)", + "refresh": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Span Zero Demo Dashboard", + "uid": "span-zero-demo-dashboard", + "weekStart": "" +} \ No newline at end of file diff --git a/apps/folder/go.mod b/apps/folder/go.mod index 00d68a71378..6f354ef72ab 100644 --- a/apps/folder/go.mod +++ b/apps/folder/go.mod @@ -55,7 +55,7 @@ require ( golang.org/x/term v0.35.0 // indirect golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.13.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/client-go v0.34.1 // indirect diff --git a/apps/folder/go.sum b/apps/folder/go.sum index 597a46f3131..d3473aa6602 100644 --- a/apps/folder/go.sum +++ b/apps/folder/go.sum @@ -152,8 +152,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/folder/pkg/apis/folder_manifest.go b/apps/folder/pkg/apis/folder_manifest.go index 37e994e62c6..4fcb76837ed 100644 --- a/apps/folder/pkg/apis/folder_manifest.go +++ b/apps/folder/pkg/apis/folder_manifest.go @@ -18,8 +18,9 @@ import ( ) var appManifestData = app.ManifestData{ - AppName: "folder", - Group: "folder.grafana.app", + AppName: "folder", + Group: "folder.grafana.app", + PreferredVersion: "v1beta1", Versions: []app.ManifestVersion{ { Name: "v1beta1", diff --git a/apps/iam/Makefile b/apps/iam/Makefile index 71b00aca821..c11c35f7a90 100644 --- a/apps/iam/Makefile +++ b/apps/iam/Makefile @@ -8,6 +8,7 @@ generate: install-app-sdk update-app-sdk ## Run Grafana App SDK code generation --grouping=group \ --defencoding=none \ --noschemasinmanifest \ + --genoperatorstate=false \ --postprocess .PHONY: deps diff --git a/apps/iam/Tiltfile b/apps/iam/Tiltfile index ab7c530e4b6..fa3c78faaf7 100644 --- a/apps/iam/Tiltfile +++ b/apps/iam/Tiltfile @@ -3,6 +3,17 @@ # https://docs.tilt.dev/api.html#api.version_settings version_settings(constraint='>=0.22.2') +custom_build( + 'iam-folder-reconciler', + command='docker buildx build --tag $EXPECTED_REF -f ./apps/iam/local/Dockerfile .', + deps=[ + 'apps/iam', + 'pkg', + ], + disable_push=True, + dir='../..', +) + k8s_yaml([filename for filename in listdir('local/yamls') if filename.lower().endswith(('.yaml', '.yml'))]) # Port forward Grafana to localhost:3000 diff --git a/apps/iam/cmd/operator/Dockerfile b/apps/iam/cmd/operator/Dockerfile deleted file mode 100644 index 513adce4f24..00000000000 --- a/apps/iam/cmd/operator/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM golang:1.24-alpine AS builder - -WORKDIR /build -COPY go.mod go.sum ./ -COPY vendor* ./vendor -RUN test -f vendor/modules.txt || go mod download - -COPY cmd cmd -COPY pkg pkg - -RUN go build -o "target/operator" cmd/operator/*.go - -FROM alpine AS runtime -COPY --from=builder /build/target/operator /usr/bin/operator - -ENTRYPOINT ["/usr/bin/operator"] \ No newline at end of file diff --git a/apps/iam/cmd/operator/authrt.go b/apps/iam/cmd/operator/authrt.go deleted file mode 100644 index 55d1c4cbecc..00000000000 --- a/apps/iam/cmd/operator/authrt.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - - utilnet "k8s.io/apimachinery/pkg/util/net" - - "github.com/grafana/authlib/authn" -) - -type authRoundTripper struct { - tokenExchangeClient *authn.TokenExchangeClient - transport http.RoundTripper -} - -func (t *authRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - tokenResponse, err := t.tokenExchangeClient.Exchange(req.Context(), authn.TokenExchangeRequest{ - Audiences: []string{"folder.grafana.app"}, - Namespace: "*", - }) - if err != nil { - return nil, fmt.Errorf("failed to exchange token: %w", err) - } - - // clone the request as RTs are not expected to mutate the passed request - req = utilnet.CloneRequest(req) - - req.Header.Set("X-Access-Token", "Bearer "+tokenResponse.Token) - return t.transport.RoundTrip(req) -} diff --git a/apps/iam/cmd/operator/config.go b/apps/iam/cmd/operator/config.go deleted file mode 100644 index f5481dfb9bd..00000000000 --- a/apps/iam/cmd/operator/config.go +++ /dev/null @@ -1,131 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/grafana/grafana-app-sdk/plugin/kubeconfig" - "github.com/grafana/grafana-app-sdk/simple" - "github.com/grafana/grafana/pkg/services/authz" -) - -const ( - ConnTypeGRPC = "grpc" - ConnTypeHTTP = "http" -) - -type Config struct { - OTelConfig simple.OpenTelemetryConfig - WebhookServer WebhookServerConfig - KubeConfig *kubeconfig.NamespacedConfig - ZanzanaClient authz.ZanzanaClientConfig - FolderReconciler FolderReconcilerConfig -} - -type WebhookServerConfig struct { - Port int - TLSCertPath string - TLSKeyPath string -} - -type FolderReconcilerConfig struct { - Namespace string - MaxConcurrentWorkers uint64 -} - -func LoadConfigFromEnv() (*Config, error) { - cfg := Config{} - cfg.OTelConfig.ServiceName = os.Getenv("OTEL_SERVICE_NAME") - switch strings.ToLower(os.Getenv("OTEL_CONN_TYPE")) { - case ConnTypeGRPC: - cfg.OTelConfig.ConnType = ConnTypeGRPC - case ConnTypeHTTP: - cfg.OTelConfig.ConnType = ConnTypeHTTP - case "": - // Default - cfg.OTelConfig.ConnType = ConnTypeHTTP - default: - return nil, fmt.Errorf("unknown OTEL_CONN_TYPE '%s'", os.Getenv("OTEL_CONN_TYPE")) - } - cfg.OTelConfig.Host = os.Getenv("OTEL_HOST") - portStr := os.Getenv("OTEL_PORT") - if portStr == "" { - if cfg.OTelConfig.ConnType == ConnTypeGRPC { - // Default OTel GRPC port - cfg.OTelConfig.Port = 4317 - } else { - // Default OTel HTTP port - cfg.OTelConfig.Port = 4318 - } - } else { - var err error - cfg.OTelConfig.Port, err = strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("invalid OTEL_PORT '%s': %w", portStr, err) - } - } - - whPortStr := os.Getenv("WEBHOOK_PORT") - if whPortStr == "" { - cfg.WebhookServer.Port = 8443 - } else { - var err error - cfg.WebhookServer.Port, err = strconv.Atoi(whPortStr) - if err != nil { - return nil, fmt.Errorf("invalid WEBHOOK_PORT '%s': %w", whPortStr, err) - } - } - - cfg.WebhookServer.TLSCertPath = os.Getenv("WEBHOOK_CERT_PATH") - cfg.WebhookServer.TLSKeyPath = os.Getenv("WEBHOOK_KEY_PATH") - - // Load the kube config - kubeConfigFile := os.Getenv("KUBE_CONFIG_FILE") - if kubeConfigFile != "" { - kubeConfig, err := LoadKubeConfigFromFile(kubeConfigFile) - if err != nil { - return nil, fmt.Errorf("unable to load kubernetes configuration from file '%s': %w", kubeConfigFile, err) - } - cfg.KubeConfig = kubeConfig - } else if folderAppURL := os.Getenv("FOLDER_APP_URL"); folderAppURL != "" { - exchangeUrl := os.Getenv("TOKEN_EXCHANGE_URL") - authToken := os.Getenv("AUTH_TOKEN") - namespace := os.Getenv("FOLDER_APP_NAMESPACE") - if exchangeUrl == "" || authToken == "" { - return nil, fmt.Errorf("TOKEN_EXCHANGE_URL and AUTH_TOKEN must be set when FOLDER_APP_URL is set") - } - - kubeConfig, err := LoadKubeConfigFromFolderAppURL(folderAppURL, exchangeUrl, authToken, namespace) - if err != nil { - return nil, fmt.Errorf("unable to load kubernetes configuration from folder app URL '%s': %w", folderAppURL, err) - } - cfg.KubeConfig = kubeConfig - } else { - kubeConfig, err := LoadInClusterConfig() - if err != nil { - return nil, fmt.Errorf("unable to load in-cluster kubernetes configuration: %w", err) - } - cfg.KubeConfig = kubeConfig - } - - cfg.ZanzanaClient.URL = os.Getenv("ZANZANA_ADDR") - cfg.ZanzanaClient.Token = os.Getenv("ZANZANA_TOKEN") - cfg.ZanzanaClient.TokenExchangeURL = os.Getenv("TOKEN_EXCHANGE_URL") - cfg.ZanzanaClient.ServerCertFile = os.Getenv("ZANZANA_SERVER_CERT_FILE") - - cfg.FolderReconciler.Namespace = os.Getenv("FOLDER_RECONCILER_NAMESPACE") - maxConcurrentWorkersStr := os.Getenv("FOLDER_RECONCILER_MAX_CONCURRENT_WORKERS") - if maxConcurrentWorkersStr == "" { - cfg.FolderReconciler.MaxConcurrentWorkers = 20 - } else { - maxConcurrentWorkers, err := strconv.ParseUint(maxConcurrentWorkersStr, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid FOLDER_RECONCILER_MAX_CONCURRENT_WORKERS '%s': %w", maxConcurrentWorkersStr, err) - } - cfg.FolderReconciler.MaxConcurrentWorkers = maxConcurrentWorkers - } - - return &cfg, nil -} diff --git a/apps/iam/cmd/operator/kubeconfig.go b/apps/iam/cmd/operator/kubeconfig.go deleted file mode 100644 index b704979cd9a..00000000000 --- a/apps/iam/cmd/operator/kubeconfig.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/transport" - - "github.com/grafana/authlib/authn" - "github.com/grafana/grafana-app-sdk/plugin/kubeconfig" -) - -// LoadInClusterConfig loads a kubernetes in-cluster config. -// Since the in-cluster config doesn't have a namespace, it defaults to "default" -func LoadInClusterConfig() (*kubeconfig.NamespacedConfig, error) { - cfg, err := rest.InClusterConfig() - if err != nil { - return nil, err - } - cfg.APIPath = "/apis" - return &kubeconfig.NamespacedConfig{ - RestConfig: *cfg, - Namespace: "default", - }, nil -} - -// LoadKubeConfigFromEnv loads a NamespacedConfig from the value of an environment variable -func LoadKubeConfigFromFolderAppURL(folderAppURL, exchangeUrl, authToken, namespace string) (*kubeconfig.NamespacedConfig, error) { - tokenExchangeClient, err := authn.NewTokenExchangeClient(authn.TokenExchangeConfig{ - TokenExchangeURL: exchangeUrl, - Token: authToken, - }) - if err != nil { - return nil, fmt.Errorf("failed to create token exchange client: %w", err) - } - - return &kubeconfig.NamespacedConfig{ - RestConfig: rest.Config{ - APIPath: "/apis", - Host: folderAppURL, - WrapTransport: transport.WrapperFunc(func(rt http.RoundTripper) http.RoundTripper { - return &authRoundTripper{ - tokenExchangeClient: tokenExchangeClient, - transport: rt, - } - }), - TLSClientConfig: rest.TLSClientConfig{ - Insecure: true, - }, - }, - Namespace: namespace, - }, nil -} - -// LoadKubeConfigFromFile loads a NamespacedConfig from a file on-disk (such as a mounted secret) -func LoadKubeConfigFromFile(configPath string) (*kubeconfig.NamespacedConfig, error) { - // Load the kubeconfig file - config, err := clientcmd.LoadFromFile(configPath) - if err != nil { - return nil, fmt.Errorf("failed to load kubeconfig from %s: %w", configPath, err) - } - - // Build the REST config from the kubeconfig - restConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - return nil, fmt.Errorf("failed to create REST config: %w", err) - } - - // Get the namespace from the current context, default to "default" if not set - namespace := "default" - if config.CurrentContext != "" { - if context, exists := config.Contexts[config.CurrentContext]; exists && context.Namespace != "" { - namespace = context.Namespace - } - } - - restConfig.APIPath = "/apis" - - return &kubeconfig.NamespacedConfig{ - RestConfig: *restConfig, - Namespace: namespace, - }, nil -} diff --git a/apps/iam/cmd/operator/main.go b/apps/iam/cmd/operator/main.go deleted file mode 100644 index 2824de11edd..00000000000 --- a/apps/iam/cmd/operator/main.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "context" - "log/slog" - "os" - "os/signal" - - "github.com/grafana/grafana-app-sdk/k8s" - "github.com/grafana/grafana-app-sdk/logging" - "github.com/grafana/grafana-app-sdk/operator" - "github.com/grafana/grafana-app-sdk/simple" - "github.com/grafana/grafana/apps/iam/pkg/app" - "github.com/prometheus/client_golang/prometheus" -) - -func main() { - // Configure the default logger to use slog - logging.DefaultLogger = logging.NewSLogLogger(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ - Level: slog.LevelDebug, - })) - - //Load the config from the environment - cfg, err := LoadConfigFromEnv() - if err != nil { - logging.DefaultLogger.With("error", err).Error("Unable to load config from environment") - panic(err) - } - - // Set up tracing - if cfg.OTelConfig.Host != "" { - err = simple.SetTraceProvider(simple.OpenTelemetryConfig{ - Host: cfg.OTelConfig.Host, - Port: cfg.OTelConfig.Port, - ConnType: cfg.OTelConfig.ConnType, - ServiceName: cfg.OTelConfig.ServiceName, - }) - if err != nil { - logging.DefaultLogger.With("error", err).Error("Unable to set trace provider") - panic(err) - } - } - - // Create the operator config and the runner - operatorConfig := operator.RunnerConfig{ - KubeConfig: cfg.KubeConfig.RestConfig, - WebhookConfig: operator.RunnerWebhookConfig{ - Port: cfg.WebhookServer.Port, - TLSConfig: k8s.TLSConfig{ - CertPath: cfg.WebhookServer.TLSCertPath, - KeyPath: cfg.WebhookServer.TLSKeyPath, - }, - }, - MetricsConfig: operator.RunnerMetricsConfig{ - Enabled: true, - }, - } - - runner, err := operator.NewRunner(operatorConfig) - if err != nil { - logging.DefaultLogger.With("error", err).Error("Unable to create operator runner") - panic(err) - } - - // Context and cancel for the operator's Run method - ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) - defer cancel() - - // Create app config from operator config - appCfg := app.AppConfig{ - ZanzanaClientCfg: cfg.ZanzanaClient, - Namespace: cfg.FolderReconciler.Namespace, - InformerConfig: app.InformerConfig{ - MaxConcurrentWorkers: cfg.FolderReconciler.MaxConcurrentWorkers, - }, - MetricsRegisterer: prometheus.DefaultRegisterer, - } - - // Run - logging.DefaultLogger.Info("Starting operator") - err = runner.Run(ctx, app.Provider(appCfg)) - if err != nil { - logging.DefaultLogger.With("error", err).Error("Operator exited with error") - panic(err) - } - logging.DefaultLogger.Info("Normal operator exit") -} diff --git a/apps/iam/go.mod b/apps/iam/go.mod index fc5e4687f64..b196d540f3f 100644 --- a/apps/iam/go.mod +++ b/apps/iam/go.mod @@ -21,19 +21,16 @@ replace github.com/grafana/grafana/pkg/aggregator => ../../pkg/aggregator replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604 require ( - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c - github.com/grafana/grafana v6.1.6+incompatible + github.com/grafana/grafana v0.0.0-00010101000000-000000000000 github.com/grafana/grafana-app-sdk v0.46.0 github.com/grafana/grafana-app-sdk/logging v0.45.0 - github.com/grafana/grafana-app-sdk/plugin v0.45.0 github.com/grafana/grafana/apps/folder v0.0.0 github.com/grafana/grafana/pkg/apimachinery v0.0.0 github.com/prometheus/client_golang v1.23.2 go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 k8s.io/apimachinery v0.34.1 - k8s.io/client-go v0.34.1 - k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b ) require ( @@ -204,8 +201,9 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d // indirect - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // indirect + github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect github.com/grafana/dataplane/sdata v0.0.9 // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/grafana-aws-sdk v1.2.0 // indirect @@ -424,7 +422,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect @@ -439,6 +437,7 @@ require ( k8s.io/api v0.34.1 // indirect k8s.io/apiextensions-apiserver v0.34.1 // indirect k8s.io/apiserver v0.34.1 // indirect + k8s.io/client-go v0.34.1 // indirect k8s.io/component-base v0.34.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kms v0.34.1 // indirect diff --git a/apps/iam/go.sum b/apps/iam/go.sum index f599d191613..5ccc841e019 100644 --- a/apps/iam/go.sum +++ b/apps/iam/go.sum @@ -721,12 +721,12 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d h1:zzEty7HgfXbQ/RiBCJFMqaZiJlqiXuz/Zbc6/H6ksuM= -github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d/go.mod h1:T5sitas9VhVj8/S9LeRLy6H75kTBdh/sCCqHo7gaQI8= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 h1:wfehM99Xlpltl9MQx8SITkgFgHmPGqrXoBCVLk/Q6NA= +github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA= github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg= github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= @@ -737,8 +737,6 @@ github.com/grafana/grafana-app-sdk v0.46.0 h1:gvzQvCQgZJ/73BfAcbDt/6TAMhnVikVPxZ github.com/grafana/grafana-app-sdk v0.46.0/go.mod h1:LCTrqR1SwBS13XGVYveBmM7giJDDjzuXK+M9VzPuPWc= github.com/grafana/grafana-app-sdk/logging v0.45.0 h1:0SH6nYZpiLBZRwUq4J6+1vo8xuHKJjnO95/2pGOoA8w= github.com/grafana/grafana-app-sdk/logging v0.45.0/go.mod h1:Gh/nBWnspK3oDNWtiM5qUF/fardHzOIEez+SPI3JeHA= -github.com/grafana/grafana-app-sdk/plugin v0.45.0 h1:rK4FL5h7SqGBDeUdrUjHVGnwgN6w8deQAKx8gJ27Iew= -github.com/grafana/grafana-app-sdk/plugin v0.45.0/go.mod h1:fZ6lWVMWr0EpkmyocxZ7MTTc9x6b2jYM93+gjlMgVD4= github.com/grafana/grafana-aws-sdk v1.2.0 h1:LLR4/g91WBuCRwm2cbWfCREq565+GxIFe08nqqIcIuw= github.com/grafana/grafana-aws-sdk v1.2.0/go.mod h1:bBo7qOmM3f61vO+2JxTolNUph1l2TmtzmWcU9/Im+8A= github.com/grafana/grafana-azure-sdk-go/v2 v2.2.0 h1:0TYrkzAc3u0HX+9GK86cGrLTUAcmQfl3/LEB3tL+SOA= @@ -1996,8 +1994,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/apps/iam/kinds/v0alpha1/teambindingspec.cue b/apps/iam/kinds/v0alpha1/teambindingspec.cue index 3bd130c902d..c20e592e458 100644 --- a/apps/iam/kinds/v0alpha1/teambindingspec.cue +++ b/apps/iam/kinds/v0alpha1/teambindingspec.cue @@ -4,12 +4,13 @@ TeamBindingSpec: { #Subject: { // uid of the identity name: string - // permission of the identity in the team - permission: TeamPermission } - subjects: [...#Subject] + subject: #Subject teamRef: TeamRef + + // permission of the identity in the team + permission: TeamPermission } TeamRef:{ diff --git a/apps/iam/local/Dockerfile b/apps/iam/local/Dockerfile new file mode 100644 index 00000000000..28df71c2384 --- /dev/null +++ b/apps/iam/local/Dockerfile @@ -0,0 +1,63 @@ +# Build stage +FROM golang:1.24.6-alpine AS builder + +# Set working directory +WORKDIR /app + +# Install bash (required for update-workspace.sh) and build tools (required for CGO) +RUN apk add --no-cache bash build-base + +# Copy source code +COPY ./apps ./apps +COPY ./pkg ./pkg +COPY ./conf ./conf +COPY ./go.mod ./go.mod +COPY ./go.sum ./go.sum +COPY ./go.work ./go.work +COPY ./go.work.sum ./go.work.sum +COPY ./build.go ./build.go +COPY ./package.json ./package.json + +# Update workspace +COPY scripts/go-workspace/update-workspace.sh ./scripts/go-workspace/update-workspace.sh +RUN bash ./scripts/go-workspace/update-workspace.sh + +# Build the application in dev mode to output binaries directly to ./bin/ +RUN go run build.go -dev build-backend + +# Final stage +FROM alpine:latest + +# Install ca-certificates for HTTPS requests and wget for health checks +RUN apk --no-cache add ca-certificates tzdata wget + +# Create non-root user +RUN addgroup -g 1001 -S appgroup && \ + adduser -u 1001 -S appuser -G appgroup + +# Set working directory +WORKDIR /usr/share/grafana + +# Copy all built binaries and conf from builder stage +COPY --from=builder /app/bin/ ./bin/ +COPY --from=builder /app/conf/ ./conf/ + +# Create necessary directories and add binaries to PATH +RUN mkdir -p /etc/grafana-config && \ + chown -R appuser:appgroup /usr/share/grafana /etc/grafana-config + +# Switch to non-root user +USER appuser + +# Add binaries to PATH +ENV PATH="/usr/share/grafana/bin:${PATH}" + +# Expose ports for metrics and profiling +EXPOSE 8080 6060 + +# Health check +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD wget --no-verbose --tries=1 --spider http://localhost:8080/metrics || exit 1 + +# Run the application with the command from the YAML (can be overridden) +CMD ["grafana", "server", "target", "--config=/etc/grafana-config/operator.ini", "--homepath=/usr/share/grafana"] diff --git a/apps/iam/local/yamls/operator.yaml b/apps/iam/local/yamls/operator.yaml index 574b9ad5b7d..6e17d5d670b 100644 --- a/apps/iam/local/yamls/operator.yaml +++ b/apps/iam/local/yamls/operator.yaml @@ -56,7 +56,8 @@ spec: spec: containers: - command: - - grafana-server + - grafana + - server - target - --config=/etc/grafana-config/operator.ini - --homepath=/usr/share/grafana @@ -78,7 +79,7 @@ spec: secretKeyRef: name: iam-operator-secrets key: grpc_auth_token - image: grafana/grafana-dev:12.3.0-17863745596 + image: iam-folder-reconciler imagePullPolicy: IfNotPresent name: iam-folder-reconciler volumeMounts: diff --git a/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go index ae553035b02..371c97fdf4b 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/corerole_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type CoreRoleClient struct { @@ -76,24 +75,6 @@ func (c *CoreRoleClient) Patch(ctx context.Context, identifier resource.Identifi return c.client.Patch(ctx, identifier, req, opts) } -func (c *CoreRoleClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus CoreRoleStatus, opts resource.UpdateOptions) (*CoreRole, error) { - return c.client.Update(ctx, &CoreRole{ - TypeMeta: metav1.TypeMeta{ - Kind: CoreRoleKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *CoreRoleClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go index 4aba32da755..625cced10b9 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/corerole_object_gen.go @@ -21,8 +21,6 @@ type CoreRole struct { // Spec is the spec of the CoreRole Spec CoreRoleSpec `json:"spec" yaml:"spec"` - - Status CoreRoleStatus `json:"status" yaml:"status"` } func (o *CoreRole) GetSpec() any { @@ -39,15 +37,11 @@ func (o *CoreRole) SetSpec(spec any) error { } func (o *CoreRole) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *CoreRole) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *CoreRole) GetSubresource(name string) (any, bool) { func (o *CoreRole) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(CoreRoleStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type CoreRoleStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *CoreRole) DeepCopyInto(dst *CoreRole) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *CoreRoleSpec) DeepCopy() *CoreRoleSpec { func (s *CoreRoleSpec) DeepCopyInto(dst *CoreRoleSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of CoreRoleStatus -func (s *CoreRoleStatus) DeepCopy() *CoreRoleStatus { - cpy := &CoreRoleStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies CoreRoleStatus into another CoreRoleStatus object -func (s *CoreRoleStatus) DeepCopyInto(dst *CoreRoleStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go index 71db46f40fa..1a0133a4798 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type GlobalRoleClient struct { @@ -76,24 +75,6 @@ func (c *GlobalRoleClient) Patch(ctx context.Context, identifier resource.Identi return c.client.Patch(ctx, identifier, req, opts) } -func (c *GlobalRoleClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus GlobalRoleStatus, opts resource.UpdateOptions) (*GlobalRole, error) { - return c.client.Update(ctx, &GlobalRole{ - TypeMeta: metav1.TypeMeta{ - Kind: GlobalRoleKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *GlobalRoleClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go index 2081adb8507..27165fe70bd 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrole_object_gen.go @@ -21,8 +21,6 @@ type GlobalRole struct { // Spec is the spec of the GlobalRole Spec GlobalRoleSpec `json:"spec" yaml:"spec"` - - Status GlobalRoleStatus `json:"status" yaml:"status"` } func (o *GlobalRole) GetSpec() any { @@ -39,15 +37,11 @@ func (o *GlobalRole) SetSpec(spec any) error { } func (o *GlobalRole) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *GlobalRole) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *GlobalRole) GetSubresource(name string) (any, bool) { func (o *GlobalRole) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(GlobalRoleStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type GlobalRoleStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *GlobalRole) DeepCopyInto(dst *GlobalRole) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *GlobalRoleSpec) DeepCopy() *GlobalRoleSpec { func (s *GlobalRoleSpec) DeepCopyInto(dst *GlobalRoleSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of GlobalRoleStatus -func (s *GlobalRoleStatus) DeepCopy() *GlobalRoleStatus { - cpy := &GlobalRoleStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies GlobalRoleStatus into another GlobalRoleStatus object -func (s *GlobalRoleStatus) DeepCopyInto(dst *GlobalRoleStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go index 66ea08e4d69..9c06311b938 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type GlobalRoleBindingClient struct { @@ -76,24 +75,6 @@ func (c *GlobalRoleBindingClient) Patch(ctx context.Context, identifier resource return c.client.Patch(ctx, identifier, req, opts) } -func (c *GlobalRoleBindingClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus GlobalRoleBindingStatus, opts resource.UpdateOptions) (*GlobalRoleBinding, error) { - return c.client.Update(ctx, &GlobalRoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: GlobalRoleBindingKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *GlobalRoleBindingClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go index 657f1830ff1..3bd4609d25d 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/globalrolebinding_object_gen.go @@ -21,8 +21,6 @@ type GlobalRoleBinding struct { // Spec is the spec of the GlobalRoleBinding Spec GlobalRoleBindingSpec `json:"spec" yaml:"spec"` - - Status GlobalRoleBindingStatus `json:"status" yaml:"status"` } func (o *GlobalRoleBinding) GetSpec() any { @@ -39,15 +37,11 @@ func (o *GlobalRoleBinding) SetSpec(spec any) error { } func (o *GlobalRoleBinding) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *GlobalRoleBinding) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *GlobalRoleBinding) GetSubresource(name string) (any, bool) { func (o *GlobalRoleBinding) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(GlobalRoleBindingStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type GlobalRoleBindingStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *GlobalRoleBinding) DeepCopyInto(dst *GlobalRoleBinding) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *GlobalRoleBindingSpec) DeepCopy() *GlobalRoleBindingSpec { func (s *GlobalRoleBindingSpec) DeepCopyInto(dst *GlobalRoleBindingSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of GlobalRoleBindingStatus -func (s *GlobalRoleBindingStatus) DeepCopy() *GlobalRoleBindingStatus { - cpy := &GlobalRoleBindingStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies GlobalRoleBindingStatus into another GlobalRoleBindingStatus object -func (s *GlobalRoleBindingStatus) DeepCopyInto(dst *GlobalRoleBindingStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go index 05c70845e26..dab46ffdc05 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ResourcePermissionClient struct { @@ -76,24 +75,6 @@ func (c *ResourcePermissionClient) Patch(ctx context.Context, identifier resourc return c.client.Patch(ctx, identifier, req, opts) } -func (c *ResourcePermissionClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus ResourcePermissionStatus, opts resource.UpdateOptions) (*ResourcePermission, error) { - return c.client.Update(ctx, &ResourcePermission{ - TypeMeta: metav1.TypeMeta{ - Kind: ResourcePermissionKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *ResourcePermissionClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go index 4fcebde9e01..996beb7e002 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/resourcepermission_object_gen.go @@ -21,8 +21,6 @@ type ResourcePermission struct { // Spec is the spec of the ResourcePermission Spec ResourcePermissionSpec `json:"spec" yaml:"spec"` - - Status ResourcePermissionStatus `json:"status" yaml:"status"` } func (o *ResourcePermission) GetSpec() any { @@ -39,15 +37,11 @@ func (o *ResourcePermission) SetSpec(spec any) error { } func (o *ResourcePermission) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *ResourcePermission) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *ResourcePermission) GetSubresource(name string) (any, bool) { func (o *ResourcePermission) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(ResourcePermissionStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type ResourcePermissionStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *ResourcePermission) DeepCopyInto(dst *ResourcePermission) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *ResourcePermissionSpec) DeepCopy() *ResourcePermissionSpec { func (s *ResourcePermissionSpec) DeepCopyInto(dst *ResourcePermissionSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of ResourcePermissionStatus -func (s *ResourcePermissionStatus) DeepCopy() *ResourcePermissionStatus { - cpy := &ResourcePermissionStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies ResourcePermissionStatus into another ResourcePermissionStatus object -func (s *ResourcePermissionStatus) DeepCopyInto(dst *ResourcePermissionStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go index e61d3fc83dd..b53bfdd9e6d 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/role_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type RoleClient struct { @@ -76,24 +75,6 @@ func (c *RoleClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *RoleClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus RoleStatus, opts resource.UpdateOptions) (*Role, error) { - return c.client.Update(ctx, &Role{ - TypeMeta: metav1.TypeMeta{ - Kind: RoleKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *RoleClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go index 9256a618c9f..20bb587157e 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/role_object_gen.go @@ -21,8 +21,6 @@ type Role struct { // Spec is the spec of the Role Spec RoleSpec `json:"spec" yaml:"spec"` - - Status RoleStatus `json:"status" yaml:"status"` } func (o *Role) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Role) SetSpec(spec any) error { } func (o *Role) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Role) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Role) GetSubresource(name string) (any, bool) { func (o *Role) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(RoleStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type RoleStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Role) DeepCopyInto(dst *Role) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *RoleSpec) DeepCopy() *RoleSpec { func (s *RoleSpec) DeepCopyInto(dst *RoleSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of RoleStatus -func (s *RoleStatus) DeepCopy() *RoleStatus { - cpy := &RoleStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies RoleStatus into another RoleStatus object -func (s *RoleStatus) DeepCopyInto(dst *RoleStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go index 13e78c70adf..96807b1b795 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type RoleBindingClient struct { @@ -76,24 +75,6 @@ func (c *RoleBindingClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *RoleBindingClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus RoleBindingStatus, opts resource.UpdateOptions) (*RoleBinding, error) { - return c.client.Update(ctx, &RoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: RoleBindingKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *RoleBindingClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go index bec75890d89..dfd7741e05a 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/rolebinding_object_gen.go @@ -21,8 +21,6 @@ type RoleBinding struct { // Spec is the spec of the RoleBinding Spec RoleBindingSpec `json:"spec" yaml:"spec"` - - Status RoleBindingStatus `json:"status" yaml:"status"` } func (o *RoleBinding) GetSpec() any { @@ -39,15 +37,11 @@ func (o *RoleBinding) SetSpec(spec any) error { } func (o *RoleBinding) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *RoleBinding) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *RoleBinding) GetSubresource(name string) (any, bool) { func (o *RoleBinding) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(RoleBindingStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type RoleBindingStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *RoleBinding) DeepCopyInto(dst *RoleBinding) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *RoleBindingSpec) DeepCopy() *RoleBindingSpec { func (s *RoleBindingSpec) DeepCopyInto(dst *RoleBindingSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of RoleBindingStatus -func (s *RoleBindingStatus) DeepCopy() *RoleBindingStatus { - cpy := &RoleBindingStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies RoleBindingStatus into another RoleBindingStatus object -func (s *RoleBindingStatus) DeepCopyInto(dst *RoleBindingStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go index 3b922e8b313..0ce1616daa3 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ServiceAccountClient struct { @@ -76,24 +75,6 @@ func (c *ServiceAccountClient) Patch(ctx context.Context, identifier resource.Id return c.client.Patch(ctx, identifier, req, opts) } -func (c *ServiceAccountClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus ServiceAccountStatus, opts resource.UpdateOptions) (*ServiceAccount, error) { - return c.client.Update(ctx, &ServiceAccount{ - TypeMeta: metav1.TypeMeta{ - Kind: ServiceAccountKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *ServiceAccountClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go index 081e244dd6e..fe3cd7f3609 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/serviceaccount_object_gen.go @@ -21,8 +21,6 @@ type ServiceAccount struct { // Spec is the spec of the ServiceAccount Spec ServiceAccountSpec `json:"spec" yaml:"spec"` - - Status ServiceAccountStatus `json:"status" yaml:"status"` } func (o *ServiceAccount) GetSpec() any { @@ -39,15 +37,11 @@ func (o *ServiceAccount) SetSpec(spec any) error { } func (o *ServiceAccount) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *ServiceAccount) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *ServiceAccount) GetSubresource(name string) (any, bool) { func (o *ServiceAccount) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(ServiceAccountStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type ServiceAccountStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *ServiceAccount) DeepCopyInto(dst *ServiceAccount) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *ServiceAccountSpec) DeepCopy() *ServiceAccountSpec { func (s *ServiceAccountSpec) DeepCopyInto(dst *ServiceAccountSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of ServiceAccountStatus -func (s *ServiceAccountStatus) DeepCopy() *ServiceAccountStatus { - cpy := &ServiceAccountStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies ServiceAccountStatus into another ServiceAccountStatus object -func (s *ServiceAccountStatus) DeepCopyInto(dst *ServiceAccountStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go index c166c6c2881..807c68dc0f6 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/team_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type TeamClient struct { @@ -76,24 +75,6 @@ func (c *TeamClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *TeamClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus TeamStatus, opts resource.UpdateOptions) (*Team, error) { - return c.client.Update(ctx, &Team{ - TypeMeta: metav1.TypeMeta{ - Kind: TeamKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *TeamClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go index bfc949acfd1..4030bebb9d1 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/team_object_gen.go @@ -21,8 +21,6 @@ type Team struct { // Spec is the spec of the Team Spec TeamSpec `json:"spec" yaml:"spec"` - - Status TeamStatus `json:"status" yaml:"status"` } func (o *Team) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Team) SetSpec(spec any) error { } func (o *Team) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Team) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Team) GetSubresource(name string) (any, bool) { func (o *Team) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(TeamStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type TeamStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Team) DeepCopyInto(dst *Team) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *TeamSpec) DeepCopy() *TeamSpec { func (s *TeamSpec) DeepCopyInto(dst *TeamSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of TeamStatus -func (s *TeamStatus) DeepCopy() *TeamStatus { - cpy := &TeamStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies TeamStatus into another TeamStatus object -func (s *TeamStatus) DeepCopyInto(dst *TeamStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go index 39729e1ece1..faaf86c1b00 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type TeamBindingClient struct { @@ -76,24 +75,6 @@ func (c *TeamBindingClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *TeamBindingClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus TeamBindingStatus, opts resource.UpdateOptions) (*TeamBinding, error) { - return c.client.Update(ctx, &TeamBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: TeamBindingKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *TeamBindingClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go index 6c9f6f407e3..a958c55f5e7 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_object_gen.go @@ -21,8 +21,6 @@ type TeamBinding struct { // Spec is the spec of the TeamBinding Spec TeamBindingSpec `json:"spec" yaml:"spec"` - - Status TeamBindingStatus `json:"status" yaml:"status"` } func (o *TeamBinding) GetSpec() any { @@ -39,15 +37,11 @@ func (o *TeamBinding) SetSpec(spec any) error { } func (o *TeamBinding) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *TeamBinding) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *TeamBinding) GetSubresource(name string) (any, bool) { func (o *TeamBinding) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(TeamBindingStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type TeamBindingStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *TeamBinding) DeepCopyInto(dst *TeamBinding) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *TeamBindingSpec) DeepCopy() *TeamBindingSpec { func (s *TeamBindingSpec) DeepCopyInto(dst *TeamBindingSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of TeamBindingStatus -func (s *TeamBindingStatus) DeepCopy() *TeamBindingStatus { - cpy := &TeamBindingStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies TeamBindingStatus into another TeamBindingStatus object -func (s *TeamBindingStatus) DeepCopyInto(dst *TeamBindingStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_spec_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_spec_gen.go index 0e58323533e..13abd605168 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/teambinding_spec_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/teambinding_spec_gen.go @@ -6,8 +6,6 @@ package v0alpha1 type TeamBindingspecSubject struct { // uid of the identity Name string `json:"name"` - // permission of the identity in the team - Permission TeamBindingTeamPermission `json:"permission"` } // NewTeamBindingspecSubject creates a new TeamBindingspecSubject object. @@ -15,14 +13,6 @@ func NewTeamBindingspecSubject() *TeamBindingspecSubject { return &TeamBindingspecSubject{} } -// +k8s:openapi-gen=true -type TeamBindingTeamPermission string - -const ( - TeamBindingTeamPermissionAdmin TeamBindingTeamPermission = "admin" - TeamBindingTeamPermissionMember TeamBindingTeamPermission = "member" -) - // +k8s:openapi-gen=true type TeamBindingTeamRef struct { // Name is the unique identifier for a team. @@ -34,16 +24,26 @@ func NewTeamBindingTeamRef() *TeamBindingTeamRef { return &TeamBindingTeamRef{} } +// +k8s:openapi-gen=true +type TeamBindingTeamPermission string + +const ( + TeamBindingTeamPermissionAdmin TeamBindingTeamPermission = "admin" + TeamBindingTeamPermissionMember TeamBindingTeamPermission = "member" +) + // +k8s:openapi-gen=true type TeamBindingSpec struct { - Subjects []TeamBindingspecSubject `json:"subjects"` - TeamRef TeamBindingTeamRef `json:"teamRef"` + Subject TeamBindingspecSubject `json:"subject"` + TeamRef TeamBindingTeamRef `json:"teamRef"` + // permission of the identity in the team + Permission TeamBindingTeamPermission `json:"permission"` } // NewTeamBindingSpec creates a new TeamBindingSpec object. func NewTeamBindingSpec() *TeamBindingSpec { return &TeamBindingSpec{ - Subjects: []TeamBindingspecSubject{}, - TeamRef: *NewTeamBindingTeamRef(), + Subject: *NewTeamBindingspecSubject(), + TeamRef: *NewTeamBindingTeamRef(), } } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go index bd7af9b3361..665df84327e 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type UserClient struct { @@ -76,24 +75,6 @@ func (c *UserClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *UserClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus UserStatus, opts resource.UpdateOptions) (*User, error) { - return c.client.Update(ctx, &User{ - TypeMeta: metav1.TypeMeta{ - Kind: UserKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *UserClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go index ce2defa33a8..373112a1d87 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go @@ -21,8 +21,6 @@ type User struct { // Spec is the spec of the User Spec UserSpec `json:"spec" yaml:"spec"` - - Status UserStatus `json:"status" yaml:"status"` } func (o *User) GetSpec() any { @@ -39,15 +37,11 @@ func (o *User) SetSpec(spec any) error { } func (o *User) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *User) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *User) GetSubresource(name string) (any, bool) { func (o *User) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(UserStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type UserStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *User) DeepCopyInto(dst *User) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *UserSpec) DeepCopy() *UserSpec { func (s *UserSpec) DeepCopyInto(dst *UserSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of UserStatus -func (s *UserStatus) DeepCopy() *UserStatus { - cpy := &UserStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies UserStatus into another UserStatus object -func (s *UserStatus) DeepCopyInto(dst *UserStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go b/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go index fb6892fa09d..2532b5df8cc 100644 --- a/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go +++ b/apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go @@ -109,18 +109,12 @@ func schema_pkg_apis_iam_v0alpha1_CoreRole(ref common.ReferenceCallback) common. Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.CoreRoleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -387,18 +381,12 @@ func schema_pkg_apis_iam_v0alpha1_GlobalRole(ref common.ReferenceCallback) commo Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -435,18 +423,12 @@ func schema_pkg_apis_iam_v0alpha1_GlobalRoleBinding(ref common.ReferenceCallback Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -948,18 +930,12 @@ func schema_pkg_apis_iam_v0alpha1_ResourcePermission(ref common.ReferenceCallbac Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ResourcePermissionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -1247,18 +1223,12 @@ func schema_pkg_apis_iam_v0alpha1_Role(ref common.ReferenceCallback) common.Open Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -1295,18 +1265,12 @@ func schema_pkg_apis_iam_v0alpha1_RoleBinding(ref common.ReferenceCallback) comm Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.RoleBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -1808,18 +1772,12 @@ func schema_pkg_apis_iam_v0alpha1_ServiceAccount(ref common.ReferenceCallback) c Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.ServiceAccountSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -2040,18 +1998,12 @@ func schema_pkg_apis_iam_v0alpha1_Team(ref common.ReferenceCallback) common.Open Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -2088,18 +2040,12 @@ func schema_pkg_apis_iam_v0alpha1_TeamBinding(ref common.ReferenceCallback) comm Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -2157,17 +2103,10 @@ func schema_pkg_apis_iam_v0alpha1_TeamBindingSpec(ref common.ReferenceCallback) SchemaProps: spec.SchemaProps{ Type: []string{"object"}, Properties: map[string]spec.Schema{ - "subjects": { + "subject": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingspecSubject"), - }, - }, - }, + Default: map[string]interface{}{}, + Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingspecSubject"), }, }, "teamRef": { @@ -2176,8 +2115,16 @@ func schema_pkg_apis_iam_v0alpha1_TeamBindingSpec(ref common.ReferenceCallback) Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingTeamRef"), }, }, + "permission": { + SchemaProps: spec.SchemaProps{ + Description: "permission of the identity in the team", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"subjects", "teamRef"}, + Required: []string{"subject", "teamRef", "permission"}, }, }, Dependencies: []string{ @@ -2264,16 +2211,8 @@ func schema_pkg_apis_iam_v0alpha1_TeamBindingspecSubject(ref common.ReferenceCal Format: "", }, }, - "permission": { - SchemaProps: spec.SchemaProps{ - Description: "permission of the identity in the team", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, }, - Required: []string{"name", "permission"}, + Required: []string{"name"}, }, }, } @@ -2547,18 +2486,12 @@ func schema_pkg_apis_iam_v0alpha1_User(ref common.ReferenceCallback) common.Open Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } diff --git a/apps/iam/pkg/apis/iam_manifest.go b/apps/iam/pkg/apis/iam_manifest.go index fba76fbd7df..24aba9a1fd8 100644 --- a/apps/iam/pkg/apis/iam_manifest.go +++ b/apps/iam/pkg/apis/iam_manifest.go @@ -18,8 +18,9 @@ import ( ) var appManifestData = app.ManifestData{ - AppName: "iam", - Group: "iam.grafana.app", + AppName: "iam", + Group: "iam.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/iam/pkg/app/app.go b/apps/iam/pkg/app/app.go index 1f433ab0c45..05216220d7e 100644 --- a/apps/iam/pkg/app/app.go +++ b/apps/iam/pkg/app/app.go @@ -77,7 +77,6 @@ func New(cfg app.Config) (app.App, error) { folderReconciler, err := reconcilers.NewFolderReconciler(reconcilers.ReconcilerConfig{ ZanzanaCfg: appSpecificConfig.ZanzanaClientCfg, - KubeConfig: &cfg.KubeConfig, Metrics: metrics, }) if err != nil { diff --git a/apps/iam/pkg/reconcilers/folder_reconciler.go b/apps/iam/pkg/reconcilers/folder_reconciler.go index f06b4b2e60c..59385b3b3a3 100644 --- a/apps/iam/pkg/reconcilers/folder_reconciler.go +++ b/apps/iam/pkg/reconcilers/folder_reconciler.go @@ -8,19 +8,14 @@ import ( "github.com/grafana/grafana-app-sdk/logging" "github.com/grafana/grafana-app-sdk/operator" foldersKind "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1" + "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/services/authz" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" - "k8s.io/client-go/rest" ) -// FolderStore interface for retrieving folder information -type FolderStore interface { - GetFolderParent(ctx context.Context, namespace, uid string) (string, error) -} - // PermissionStore interface for managing folder permissions type PermissionStore interface { GetFolderParents(ctx context.Context, namespace, folderUID string) ([]string, error) @@ -31,13 +26,11 @@ type PermissionStore interface { // ReconcilerConfig represents the app-specific configuration type ReconcilerConfig struct { ZanzanaCfg authz.ZanzanaClientConfig - KubeConfig *rest.Config Metrics *ReconcilerMetrics } type FolderReconciler struct { permissionStore PermissionStore - folderStore FolderStore metrics *ReconcilerMetrics } @@ -50,12 +43,10 @@ func NewFolderReconciler(cfg ReconcilerConfig) (operator.Reconciler, error) { } // Create dependencies - folderStore := NewAPIFolderStore(cfg.KubeConfig) permissionStore := NewZanzanaPermissionStore(zanzanaClient) folderReconciler := &FolderReconciler{ permissionStore: permissionStore, - folderStore: folderStore, metrics: cfg.Metrics, } @@ -137,11 +128,11 @@ func (r *FolderReconciler) handleUpdateFolder(ctx context.Context, folder *folde folderUID := folder.Name namespace := folder.Namespace - parentUID, err := r.folderStore.GetFolderParent(ctx, namespace, folderUID) + parentUID, err := getFolderParent(ctx, folder) if err != nil { logger.Error("Error getting folder parent", "error", err) if r.metrics != nil { - r.metrics.RecordReconcileFailure(action, "folder_store") + r.metrics.RecordReconcileFailure(action, "failure_informer") } return operator.ReconcileResult{}, err } @@ -217,3 +208,21 @@ func validateFolder(folder *foldersKind.Folder) error { } return nil } + +func getFolderParent(ctx context.Context, folder *foldersKind.Folder) (string, error) { + tracer := otel.GetTracerProvider().Tracer("iam-folder-reconciler") + _, span := tracer.Start(ctx, "get-folder-parent", + trace.WithAttributes( + attribute.String("folder.uid", folder.Name), + ), + ) + defer span.End() + + folderMeta, err := utils.MetaAccessor(folder) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, "failed to get folder meta accessor") + return "", err + } + return folderMeta.GetFolder(), nil +} diff --git a/apps/iam/pkg/reconcilers/folder_service.go b/apps/iam/pkg/reconcilers/folder_service.go deleted file mode 100644 index 0a1a74a4257..00000000000 --- a/apps/iam/pkg/reconcilers/folder_service.go +++ /dev/null @@ -1,76 +0,0 @@ -package reconcilers - -import ( - "context" - "fmt" - - foldersKind "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1" - "github.com/grafana/grafana/pkg/apimachinery/utils" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" -) - -var _ FolderStore = (*APIFolderStore)(nil) - -func NewAPIFolderStore(config *rest.Config) FolderStore { - return &APIFolderStore{config} -} - -type APIFolderStore struct { - config *rest.Config -} - -func (s *APIFolderStore) GetFolderParent(ctx context.Context, namespace, uid string) (string, error) { - tracer := otel.GetTracerProvider().Tracer("iam-folder-reconciler") - ctx, span := tracer.Start(ctx, "APIFolderStore.GetFolderParent", - trace.WithAttributes( - attribute.String("folder.uid", uid), - attribute.String("folder.namespace", namespace), - ), - ) - defer span.End() - - client, err := s.client(namespace) - if err != nil { - span.RecordError(err) - span.SetStatus(codes.Error, "failed to create kubernetes client") - return "", fmt.Errorf("create resource client: %w", err) - } - - // Get the folder by UID - unstructuredObj, err := client.Get(ctx, uid, metav1.GetOptions{}) - if err != nil { - span.RecordError(err) - span.SetStatus(codes.Error, "failed to get folder from kubernetes API") - return "", fmt.Errorf("get folder %s: %w", uid, err) - } - - object, err := utils.MetaAccessor(unstructuredObj) - if err != nil { - span.RecordError(err) - span.SetStatus(codes.Error, "failed to get meta accessor from folder object") - return "", fmt.Errorf("get meta accessor: %w", err) - } - - parentUID := object.GetFolder() - span.SetAttributes(attribute.String("folder.parent_uid", parentUID)) - span.SetStatus(codes.Ok, "successfully retrieved folder parent") - span.AddEvent("folder.parent.retrieved", trace.WithAttributes( - attribute.String("parent.uid", parentUID), - )) - - return parentUID, nil -} - -func (s *APIFolderStore) client(namespace string) (dynamic.ResourceInterface, error) { - client, err := dynamic.NewForConfig(s.config) - if err != nil { - return nil, err - } - return client.Resource(foldersKind.FolderResourceInfo.GroupVersionResource()).Namespace(namespace), nil -} diff --git a/apps/iam/pkg/reconcilers/metrics.go b/apps/iam/pkg/reconcilers/metrics.go index 65a14e32df5..10802744105 100644 --- a/apps/iam/pkg/reconcilers/metrics.go +++ b/apps/iam/pkg/reconcilers/metrics.go @@ -22,7 +22,6 @@ func NewReconcilerMetrics(registerer prometheus.Registerer, namespace string) *R "success_no_changes_needed", "failure_informer", "failure_permission_store", - "failure_folder_store", "failure_unknown", }, } diff --git a/apps/investigations/go.mod b/apps/investigations/go.mod index e67e4fdb1d0..4dd278e7762 100644 --- a/apps/investigations/go.mod +++ b/apps/investigations/go.mod @@ -78,7 +78,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/investigations/go.sum b/apps/investigations/go.sum index dcb8dea6862..6418db2eb84 100644 --- a/apps/investigations/go.sum +++ b/apps/investigations/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/investigations/pkg/apis/investigations/v0alpha1/investigation_client_gen.go b/apps/investigations/pkg/apis/investigations/v0alpha1/investigation_client_gen.go index 5a615356172..dc73a0301e9 100644 --- a/apps/investigations/pkg/apis/investigations/v0alpha1/investigation_client_gen.go +++ b/apps/investigations/pkg/apis/investigations/v0alpha1/investigation_client_gen.go @@ -76,7 +76,7 @@ func (c *InvestigationClient) Patch(ctx context.Context, identifier resource.Ide return c.client.Patch(ctx, identifier, req, opts) } -func (c *InvestigationClient) UpdateStatus(ctx context.Context, newStatus InvestigationStatus, opts resource.UpdateOptions) (*Investigation, error) { +func (c *InvestigationClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus InvestigationStatus, opts resource.UpdateOptions) (*Investigation, error) { return c.client.Update(ctx, &Investigation{ TypeMeta: metav1.TypeMeta{ Kind: InvestigationKind().Kind(), @@ -84,6 +84,8 @@ func (c *InvestigationClient) UpdateStatus(ctx context.Context, newStatus Invest }, ObjectMeta: metav1.ObjectMeta{ ResourceVersion: opts.ResourceVersion, + Namespace: identifier.Namespace, + Name: identifier.Name, }, Status: newStatus, }, resource.UpdateOptions{ diff --git a/apps/investigations/pkg/apis/investigations/v0alpha1/investigationindex_client_gen.go b/apps/investigations/pkg/apis/investigations/v0alpha1/investigationindex_client_gen.go index 3b63abc5d09..573d743b3cf 100644 --- a/apps/investigations/pkg/apis/investigations/v0alpha1/investigationindex_client_gen.go +++ b/apps/investigations/pkg/apis/investigations/v0alpha1/investigationindex_client_gen.go @@ -76,7 +76,7 @@ func (c *InvestigationIndexClient) Patch(ctx context.Context, identifier resourc return c.client.Patch(ctx, identifier, req, opts) } -func (c *InvestigationIndexClient) UpdateStatus(ctx context.Context, newStatus InvestigationIndexStatus, opts resource.UpdateOptions) (*InvestigationIndex, error) { +func (c *InvestigationIndexClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus InvestigationIndexStatus, opts resource.UpdateOptions) (*InvestigationIndex, error) { return c.client.Update(ctx, &InvestigationIndex{ TypeMeta: metav1.TypeMeta{ Kind: InvestigationIndexKind().Kind(), @@ -84,6 +84,8 @@ func (c *InvestigationIndexClient) UpdateStatus(ctx context.Context, newStatus I }, ObjectMeta: metav1.ObjectMeta{ ResourceVersion: opts.ResourceVersion, + Namespace: identifier.Namespace, + Name: identifier.Name, }, Status: newStatus, }, resource.UpdateOptions{ diff --git a/apps/investigations/pkg/apis/investigations_manifest.go b/apps/investigations/pkg/apis/investigations_manifest.go index e4d0dc6fbd9..04c52dbd9ac 100644 --- a/apps/investigations/pkg/apis/investigations_manifest.go +++ b/apps/investigations/pkg/apis/investigations_manifest.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/grafana-app-sdk/app" "github.com/grafana/grafana-app-sdk/resource" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kube-openapi/pkg/spec3" v0alpha1 "github.com/grafana/grafana/apps/investigations/pkg/apis/investigations/v0alpha1" ) @@ -27,8 +28,9 @@ var ( ) var appManifestData = app.ManifestData{ - AppName: "investigations", - Group: "investigations.grafana.app", + AppName: "investigations", + Group: "investigations.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", @@ -50,6 +52,10 @@ var appManifestData = app.ManifestData{ Schema: &versionSchemaInvestigationIndexv0alpha1, }, }, + Routes: app.ManifestVersionRoutes{ + Namespaced: map[string]spec3.PathProps{}, + Cluster: map[string]spec3.PathProps{}, + }, }, }, } @@ -79,6 +85,7 @@ var customRouteToGoResponseType = map[string]any{} // ManifestCustomRouteResponsesAssociator returns the associated response go type for a given kind, version, custom route path, and method, if one exists. // kind may be empty for custom routes which are not kind subroutes. Leading slashes are removed from subroute paths. // If there is no association for the provided kind, version, custom route path, and method, exists will return false. +// Resource routes (those without a kind) should prefix their route with "/" if the route is namespaced (otherwise the route is assumed to be cluster-scope) func ManifestCustomRouteResponsesAssociator(kind, version, path, verb string) (goType any, exists bool) { if len(path) > 0 && path[0] == '/' { path = path[1:] @@ -97,8 +104,22 @@ func ManifestCustomRouteQueryAssociator(kind, version, path, verb string) (goTyp return goType, exists } +var customRouteToGoRequestBodyType = map[string]any{} + +func ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb string) (goType any, exists bool) { + if len(path) > 0 && path[0] == '/' { + path = path[1:] + } + goType, exists = customRouteToGoRequestBodyType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))] + return goType, exists +} + type GoTypeAssociator struct{} +func NewGoTypeAssociator() *GoTypeAssociator { + return &GoTypeAssociator{} +} + func (g *GoTypeAssociator) KindToGoType(kind, version string) (goType resource.Kind, exists bool) { return ManifestGoTypeAssociator(kind, version) } @@ -108,3 +129,6 @@ func (g *GoTypeAssociator) CustomRouteReturnGoType(kind, version, path, verb str func (g *GoTypeAssociator) CustomRouteQueryGoType(kind, version, path, verb string) (goType runtime.Object, exists bool) { return ManifestCustomRouteQueryAssociator(kind, version, path, verb) } +func (g *GoTypeAssociator) CustomRouteRequestBodyGoType(kind, version, path, verb string) (goType any, exists bool) { + return ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb) +} diff --git a/apps/investigations/pkg/app/investigations_app.go b/apps/investigations/pkg/app/investigations_app.go index b3589cea6e4..516d278788d 100644 --- a/apps/investigations/pkg/app/investigations_app.go +++ b/apps/investigations/pkg/app/investigations_app.go @@ -20,7 +20,7 @@ func New(cfg app.Config) (app.App, error) { KubeConfig: cfg.KubeConfig, InformerConfig: simple.AppInformerConfig{ InformerOptions: operator.InformerOptions{ - ErrorHandler: func(ctx context.Context, err error) { + ErrorHandler: func(_ context.Context, err error) { klog.ErrorS(err, "Informer processing error") }, }, diff --git a/apps/playlist/go.mod b/apps/playlist/go.mod index 3ec3f52b03a..6df653c9204 100644 --- a/apps/playlist/go.mod +++ b/apps/playlist/go.mod @@ -79,7 +79,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/playlist/go.sum b/apps/playlist/go.sum index dcb8dea6862..6418db2eb84 100644 --- a/apps/playlist/go.sum +++ b/apps/playlist/go.sum @@ -203,8 +203,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/playlist/pkg/apis/playlist_manifest.go b/apps/playlist/pkg/apis/playlist_manifest.go index f27cf6dd635..1baede5a7a4 100644 --- a/apps/playlist/pkg/apis/playlist_manifest.go +++ b/apps/playlist/pkg/apis/playlist_manifest.go @@ -25,8 +25,9 @@ var ( ) var appManifestData = app.ManifestData{ - AppName: "playlist", - Group: "playlist.grafana.app", + AppName: "playlist", + Group: "playlist.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/plugins/go.mod b/apps/plugins/go.mod index a6dcaddde55..653a61b9087 100644 --- a/apps/plugins/go.mod +++ b/apps/plugins/go.mod @@ -3,7 +3,7 @@ module github.com/grafana/grafana/apps/plugins go 1.24.4 require ( - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 github.com/grafana/grafana-app-sdk v0.46.0 github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250428110029-a8ea72012bde k8s.io/apimachinery v0.34.1 @@ -35,7 +35,7 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/grafana-app-sdk/logging v0.45.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect @@ -86,7 +86,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/plugins/go.sum b/apps/plugins/go.sum index 87d56ef8f14..056d7187916 100644 --- a/apps/plugins/go.sum +++ b/apps/plugins/go.sum @@ -50,10 +50,10 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.46.0 h1:gvzQvCQgZJ/73BfAcbDt/6TAMhnVikVPxZt/UwDl+oc= @@ -219,8 +219,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/plugins/pkg/apis/plugins_manifest.go b/apps/plugins/pkg/apis/plugins_manifest.go index 2ebfb699452..3965a906139 100644 --- a/apps/plugins/pkg/apis/plugins_manifest.go +++ b/apps/plugins/pkg/apis/plugins_manifest.go @@ -28,8 +28,9 @@ var ( ) var appManifestData = app.ManifestData{ - AppName: "plugins", - Group: "plugins.grafana.app", + AppName: "plugins", + Group: "plugins.grafana.app", + PreferredVersion: "v0alpha1", Versions: []app.ManifestVersion{ { Name: "v0alpha1", diff --git a/apps/preferences/Makefile b/apps/preferences/Makefile index 230bfd4149a..bc8d6d30cb5 100644 --- a/apps/preferences/Makefile +++ b/apps/preferences/Makefile @@ -6,4 +6,5 @@ generate: install-app-sdk update-app-sdk --source=./kinds/ \ --gogenpath=./pkg/apis \ --grouping=group \ + --genoperatorstate=false \ --defencoding=none \ No newline at end of file diff --git a/apps/preferences/go.mod b/apps/preferences/go.mod index 85e2330c000..bc92c33c5a3 100644 --- a/apps/preferences/go.mod +++ b/apps/preferences/go.mod @@ -55,7 +55,7 @@ require ( golang.org/x/term v0.35.0 // indirect golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.13.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/client-go v0.34.1 // indirect diff --git a/apps/preferences/go.sum b/apps/preferences/go.sum index f7d33fcb39c..4721393801d 100644 --- a/apps/preferences/go.sum +++ b/apps/preferences/go.sum @@ -152,8 +152,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go index 380bafb16bf..66e661e574d 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type PreferencesClient struct { @@ -76,24 +75,6 @@ func (c *PreferencesClient) Patch(ctx context.Context, identifier resource.Ident return c.client.Patch(ctx, identifier, req, opts) } -func (c *PreferencesClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus PreferencesStatus, opts resource.UpdateOptions) (*Preferences, error) { - return c.client.Update(ctx, &Preferences{ - TypeMeta: metav1.TypeMeta{ - Kind: PreferencesKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *PreferencesClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go index 9f1857a5f7b..81a4b4351ab 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_object_gen.go @@ -21,8 +21,6 @@ type Preferences struct { // Spec is the spec of the Preferences Spec PreferencesSpec `json:"spec" yaml:"spec"` - - Status PreferencesStatus `json:"status" yaml:"status"` } func (o *Preferences) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Preferences) SetSpec(spec any) error { } func (o *Preferences) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Preferences) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Preferences) GetSubresource(name string) (any, bool) { func (o *Preferences) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(PreferencesStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type PreferencesStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Preferences) DeepCopyInto(dst *Preferences) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *PreferencesSpec) DeepCopy() *PreferencesSpec { func (s *PreferencesSpec) DeepCopyInto(dst *PreferencesSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of PreferencesStatus -func (s *PreferencesStatus) DeepCopy() *PreferencesStatus { - cpy := &PreferencesStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies PreferencesStatus into another PreferencesStatus object -func (s *PreferencesStatus) DeepCopyInto(dst *PreferencesStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_status_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_status_gen.go deleted file mode 100644 index 420dd054d3d..00000000000 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/preferences_status_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -package v1alpha1 - -// +k8s:openapi-gen=true -type PreferencesstatusOperatorState struct { - // lastEvaluation is the ResourceVersion last evaluated - LastEvaluation string `json:"lastEvaluation"` - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - State PreferencesStatusOperatorStateState `json:"state"` - // descriptiveState is an optional more descriptive state field which has no requirements on format - DescriptiveState *string `json:"descriptiveState,omitempty"` - // details contains any extra information that is operator-specific - Details map[string]interface{} `json:"details,omitempty"` -} - -// NewPreferencesstatusOperatorState creates a new PreferencesstatusOperatorState object. -func NewPreferencesstatusOperatorState() *PreferencesstatusOperatorState { - return &PreferencesstatusOperatorState{} -} - -// +k8s:openapi-gen=true -type PreferencesStatus struct { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - OperatorStates map[string]PreferencesstatusOperatorState `json:"operatorStates,omitempty"` - // additionalFields is reserved for future use - AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"` -} - -// NewPreferencesStatus creates a new PreferencesStatus object. -func NewPreferencesStatus() *PreferencesStatus { - return &PreferencesStatus{} -} - -// +k8s:openapi-gen=true -type PreferencesStatusOperatorStateState string - -const ( - PreferencesStatusOperatorStateStateSuccess PreferencesStatusOperatorStateState = "success" - PreferencesStatusOperatorStateStateInProgress PreferencesStatusOperatorStateState = "in_progress" - PreferencesStatusOperatorStateStateFailed PreferencesStatusOperatorStateState = "failed" -) diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go index 3a607012db3..9f2c56ea59b 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_client_gen.go @@ -4,7 +4,6 @@ import ( "context" "github.com/grafana/grafana-app-sdk/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type StarsClient struct { @@ -76,24 +75,6 @@ func (c *StarsClient) Patch(ctx context.Context, identifier resource.Identifier, return c.client.Patch(ctx, identifier, req, opts) } -func (c *StarsClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus StarsStatus, opts resource.UpdateOptions) (*Stars, error) { - return c.client.Update(ctx, &Stars{ - TypeMeta: metav1.TypeMeta{ - Kind: StarsKind().Kind(), - APIVersion: GroupVersion.Identifier(), - }, - ObjectMeta: metav1.ObjectMeta{ - ResourceVersion: opts.ResourceVersion, - Namespace: identifier.Namespace, - Name: identifier.Name, - }, - Status: newStatus, - }, resource.UpdateOptions{ - Subresource: "status", - ResourceVersion: opts.ResourceVersion, - }) -} - func (c *StarsClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { return c.client.Delete(ctx, identifier, opts) } diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go index d6836cea565..8381844dceb 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_object_gen.go @@ -21,8 +21,6 @@ type Stars struct { // Spec is the spec of the Stars Spec StarsSpec `json:"spec" yaml:"spec"` - - Status StarsStatus `json:"status" yaml:"status"` } func (o *Stars) GetSpec() any { @@ -39,15 +37,11 @@ func (o *Stars) SetSpec(spec any) error { } func (o *Stars) GetSubresources() map[string]any { - return map[string]any{ - "status": o.Status, - } + return map[string]any{} } func (o *Stars) GetSubresource(name string) (any, bool) { switch name { - case "status": - return o.Status, true default: return nil, false } @@ -55,13 +49,6 @@ func (o *Stars) GetSubresource(name string) (any, bool) { func (o *Stars) SetSubresource(name string, value any) error { switch name { - case "status": - cast, ok := value.(StarsStatus) - if !ok { - return fmt.Errorf("cannot set status type %#v, not of type StarsStatus", value) - } - o.Status = cast - return nil default: return fmt.Errorf("subresource '%s' does not exist", name) } @@ -233,7 +220,6 @@ func (o *Stars) DeepCopyInto(dst *Stars) { dst.TypeMeta.Kind = o.TypeMeta.Kind o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta) o.Spec.DeepCopyInto(&dst.Spec) - o.Status.DeepCopyInto(&dst.Status) } // Interface compliance compile-time check @@ -305,15 +291,3 @@ func (s *StarsSpec) DeepCopy() *StarsSpec { func (s *StarsSpec) DeepCopyInto(dst *StarsSpec) { resource.CopyObjectInto(dst, s) } - -// DeepCopy creates a full deep copy of StarsStatus -func (s *StarsStatus) DeepCopy() *StarsStatus { - cpy := &StarsStatus{} - s.DeepCopyInto(cpy) - return cpy -} - -// DeepCopyInto deep copies StarsStatus into another StarsStatus object -func (s *StarsStatus) DeepCopyInto(dst *StarsStatus) { - resource.CopyObjectInto(dst, s) -} diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_status_gen.go b/apps/preferences/pkg/apis/preferences/v1alpha1/stars_status_gen.go deleted file mode 100644 index c8c7c2b07f2..00000000000 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/stars_status_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated - EDITING IS FUTILE. DO NOT EDIT. - -package v1alpha1 - -// +k8s:openapi-gen=true -type StarsstatusOperatorState struct { - // lastEvaluation is the ResourceVersion last evaluated - LastEvaluation string `json:"lastEvaluation"` - // state describes the state of the lastEvaluation. - // It is limited to three possible states for machine evaluation. - State StarsStatusOperatorStateState `json:"state"` - // descriptiveState is an optional more descriptive state field which has no requirements on format - DescriptiveState *string `json:"descriptiveState,omitempty"` - // details contains any extra information that is operator-specific - Details map[string]interface{} `json:"details,omitempty"` -} - -// NewStarsstatusOperatorState creates a new StarsstatusOperatorState object. -func NewStarsstatusOperatorState() *StarsstatusOperatorState { - return &StarsstatusOperatorState{} -} - -// +k8s:openapi-gen=true -type StarsStatus struct { - // operatorStates is a map of operator ID to operator state evaluations. - // Any operator which consumes this kind SHOULD add its state evaluation information to this field. - OperatorStates map[string]StarsstatusOperatorState `json:"operatorStates,omitempty"` - // additionalFields is reserved for future use - AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"` -} - -// NewStarsStatus creates a new StarsStatus object. -func NewStarsStatus() *StarsStatus { - return &StarsStatus{} -} - -// +k8s:openapi-gen=true -type StarsStatusOperatorStateState string - -const ( - StarsStatusOperatorStateStateSuccess StarsStatusOperatorStateState = "success" - StarsStatusOperatorStateStateInProgress StarsStatusOperatorStateState = "in_progress" - StarsStatusOperatorStateStateFailed StarsStatusOperatorStateState = "failed" -) diff --git a/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go b/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go index 7b9014a57bb..c8700a3dfa5 100644 --- a/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go +++ b/apps/preferences/pkg/apis/preferences/v1alpha1/zz_generated.openapi.go @@ -20,14 +20,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesNavbarPreference": schema_pkg_apis_preferences_v1alpha1_PreferencesNavbarPreference(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesQueryHistoryPreference": schema_pkg_apis_preferences_v1alpha1_PreferencesQueryHistoryPreference(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec": schema_pkg_apis_preferences_v1alpha1_PreferencesSpec(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesStatus": schema_pkg_apis_preferences_v1alpha1_PreferencesStatus(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesstatusOperatorState": schema_pkg_apis_preferences_v1alpha1_PreferencesstatusOperatorState(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.Stars": schema_pkg_apis_preferences_v1alpha1_Stars(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsList": schema_pkg_apis_preferences_v1alpha1_StarsList(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsResource": schema_pkg_apis_preferences_v1alpha1_StarsResource(ref), "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec": schema_pkg_apis_preferences_v1alpha1_StarsSpec(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsStatus": schema_pkg_apis_preferences_v1alpha1_StarsStatus(ref), - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsstatusOperatorState": schema_pkg_apis_preferences_v1alpha1_StarsstatusOperatorState(ref), } } @@ -64,18 +60,12 @@ func schema_pkg_apis_preferences_v1alpha1_Preferences(ref common.ReferenceCallba Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec", "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -277,101 +267,6 @@ func schema_pkg_apis_preferences_v1alpha1_PreferencesSpec(ref common.ReferenceCa } } -func schema_pkg_apis_preferences_v1alpha1_PreferencesStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "operatorStates": { - SchemaProps: spec.SchemaProps{ - Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesstatusOperatorState"), - }, - }, - }, - }, - }, - "additionalFields": { - SchemaProps: spec.SchemaProps{ - Description: "additionalFields is reserved for future use", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.PreferencesstatusOperatorState"}, - } -} - -func schema_pkg_apis_preferences_v1alpha1_PreferencesstatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "lastEvaluation": { - SchemaProps: spec.SchemaProps{ - Description: "lastEvaluation is the ResourceVersion last evaluated", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "state": { - SchemaProps: spec.SchemaProps{ - Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "descriptiveState": { - SchemaProps: spec.SchemaProps{ - Description: "descriptiveState is an optional more descriptive state field which has no requirements on format", - Type: []string{"string"}, - Format: "", - }, - }, - "details": { - SchemaProps: spec.SchemaProps{ - Description: "details contains any extra information that is operator-specific", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"lastEvaluation", "state"}, - }, - }, - } -} - func schema_pkg_apis_preferences_v1alpha1_Stars(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -405,18 +300,12 @@ func schema_pkg_apis_preferences_v1alpha1_Stars(ref common.ReferenceCallback) co Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec"), }, }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsStatus"), - }, - }, }, - Required: []string{"metadata", "spec", "status"}, + Required: []string{"metadata", "spec"}, }, }, Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec", "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } @@ -542,98 +431,3 @@ func schema_pkg_apis_preferences_v1alpha1_StarsSpec(ref common.ReferenceCallback "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsResource"}, } } - -func schema_pkg_apis_preferences_v1alpha1_StarsStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "operatorStates": { - SchemaProps: spec.SchemaProps{ - Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsstatusOperatorState"), - }, - }, - }, - }, - }, - "additionalFields": { - SchemaProps: spec.SchemaProps{ - Description: "additionalFields is reserved for future use", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1.StarsstatusOperatorState"}, - } -} - -func schema_pkg_apis_preferences_v1alpha1_StarsstatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "lastEvaluation": { - SchemaProps: spec.SchemaProps{ - Description: "lastEvaluation is the ResourceVersion last evaluated", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "state": { - SchemaProps: spec.SchemaProps{ - Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "descriptiveState": { - SchemaProps: spec.SchemaProps{ - Description: "descriptiveState is an optional more descriptive state field which has no requirements on format", - Type: []string{"string"}, - Format: "", - }, - }, - "details": { - SchemaProps: spec.SchemaProps{ - Description: "details contains any extra information that is operator-specific", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"lastEvaluation", "state"}, - }, - }, - } -} diff --git a/apps/preferences/pkg/apis/preferences_manifest.go b/apps/preferences/pkg/apis/preferences_manifest.go index b0a3468058a..3912bc12866 100644 --- a/apps/preferences/pkg/apis/preferences_manifest.go +++ b/apps/preferences/pkg/apis/preferences_manifest.go @@ -19,17 +19,18 @@ import ( ) var ( - rawSchemaPreferencesv1alpha1 = []byte(`{"CookiePreferences":{"additionalProperties":false,"properties":{"analytics":{"additionalProperties":{},"type":"object"},"functional":{"additionalProperties":{},"type":"object"},"performance":{"additionalProperties":{},"type":"object"}},"type":"object"},"NavbarPreference":{"additionalProperties":false,"properties":{"bookmarkUrls":{"items":{"type":"string"},"type":"array"}},"required":["bookmarkUrls"],"type":"object"},"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"Preferences":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"QueryHistoryPreference":{"additionalProperties":false,"properties":{"homeTab":{"description":"one of: '' | 'query' | 'starred';","type":"string"}},"type":"object"},"spec":{"additionalProperties":false,"properties":{"cookiePreferences":{"$ref":"#/components/schemas/CookiePreferences","description":"Cookie preferences"},"homeDashboardUID":{"description":"UID for the home dashboard","type":"string"},"language":{"description":"Selected language (beta)","type":"string"},"navbar":{"$ref":"#/components/schemas/NavbarPreference","description":"Navigation preferences"},"queryHistory":{"$ref":"#/components/schemas/QueryHistoryPreference","description":"Explore query history preferences"},"regionalFormat":{"description":"Selected locale (beta)","type":"string"},"theme":{"description":"light, dark, empty is default","type":"string"},"timezone":{"description":"The timezone selection\nTODO: this should use the timezone defined in common","type":"string"},"weekStart":{"description":"day of the week (sunday, monday, etc)","type":"string"}},"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"additionalFields is reserved for future use","type":"object"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"type":"object"}}`) + rawSchemaPreferencesv1alpha1 = []byte(`{"CookiePreferences":{"additionalProperties":false,"properties":{"analytics":{"additionalProperties":{},"type":"object"},"functional":{"additionalProperties":{},"type":"object"},"performance":{"additionalProperties":{},"type":"object"}},"type":"object"},"NavbarPreference":{"additionalProperties":false,"properties":{"bookmarkUrls":{"items":{"type":"string"},"type":"array"}},"required":["bookmarkUrls"],"type":"object"},"Preferences":{"properties":{"spec":{"$ref":"#/components/schemas/spec"}},"required":["spec"]},"QueryHistoryPreference":{"additionalProperties":false,"properties":{"homeTab":{"description":"one of: '' | 'query' | 'starred';","type":"string"}},"type":"object"},"spec":{"additionalProperties":false,"properties":{"cookiePreferences":{"$ref":"#/components/schemas/CookiePreferences","description":"Cookie preferences"},"homeDashboardUID":{"description":"UID for the home dashboard","type":"string"},"language":{"description":"Selected language (beta)","type":"string"},"navbar":{"$ref":"#/components/schemas/NavbarPreference","description":"Navigation preferences"},"queryHistory":{"$ref":"#/components/schemas/QueryHistoryPreference","description":"Explore query history preferences"},"regionalFormat":{"description":"Selected locale (beta)","type":"string"},"theme":{"description":"light, dark, empty is default","type":"string"},"timezone":{"description":"The timezone selection\nTODO: this should use the timezone defined in common","type":"string"},"weekStart":{"description":"day of the week (sunday, monday, etc)","type":"string"}},"type":"object"}}`) versionSchemaPreferencesv1alpha1 app.VersionSchema _ = json.Unmarshal(rawSchemaPreferencesv1alpha1, &versionSchemaPreferencesv1alpha1) - rawSchemaStarsv1alpha1 = []byte(`{"OperatorState":{"additionalProperties":false,"properties":{"descriptiveState":{"description":"descriptiveState is an optional more descriptive state field which has no requirements on format","type":"string"},"details":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"details contains any extra information that is operator-specific","type":"object"},"lastEvaluation":{"description":"lastEvaluation is the ResourceVersion last evaluated","type":"string"},"state":{"description":"state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.","enum":["success","in_progress","failed"],"type":"string"}},"required":["lastEvaluation","state"],"type":"object"},"Resource":{"additionalProperties":false,"properties":{"group":{"type":"string"},"kind":{"type":"string"},"names":{"description":"The set of resources\n+listType=set","items":{"type":"string"},"type":"array"}},"required":["group","kind","names"],"type":"object"},"Stars":{"properties":{"spec":{"$ref":"#/components/schemas/spec"},"status":{"$ref":"#/components/schemas/status"}},"required":["spec"]},"spec":{"additionalProperties":false,"properties":{"resource":{"items":{"$ref":"#/components/schemas/Resource"},"type":"array"}},"required":["resource"],"type":"object"},"status":{"additionalProperties":false,"properties":{"additionalFields":{"additionalProperties":{"additionalProperties":{},"type":"object"},"description":"additionalFields is reserved for future use","type":"object"},"operatorStates":{"additionalProperties":{"$ref":"#/components/schemas/OperatorState"},"description":"operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.","type":"object"}},"type":"object"}}`) + rawSchemaStarsv1alpha1 = []byte(`{"Resource":{"additionalProperties":false,"properties":{"group":{"type":"string"},"kind":{"type":"string"},"names":{"description":"The set of resources\n+listType=set","items":{"type":"string"},"type":"array"}},"required":["group","kind","names"],"type":"object"},"Stars":{"properties":{"spec":{"$ref":"#/components/schemas/spec"}},"required":["spec"]},"spec":{"additionalProperties":false,"properties":{"resource":{"items":{"$ref":"#/components/schemas/Resource"},"type":"array"}},"required":["resource"],"type":"object"}}`) versionSchemaStarsv1alpha1 app.VersionSchema _ = json.Unmarshal(rawSchemaStarsv1alpha1, &versionSchemaStarsv1alpha1) ) var appManifestData = app.ManifestData{ - AppName: "preferences", - Group: "preferences.grafana.app", + AppName: "preferences", + Group: "preferences.grafana.app", + PreferredVersion: "v1alpha1", Versions: []app.ManifestVersion{ { Name: "v1alpha1", diff --git a/apps/provisioning/Makefile b/apps/provisioning/Makefile index c3d7e748603..e9a8b1fbdf9 100644 --- a/apps/provisioning/Makefile +++ b/apps/provisioning/Makefile @@ -1,8 +1,13 @@ include ../sdk.mk -.PHONY: generate +.PHONY: generate # Run Grafana App SDK code generation generate: install-app-sdk update-app-sdk - @$(APP_SDK_BIN) generate -g ./kinds --grouping=group --postprocess --defencoding=none --useoldmanifestkinds + @$(APP_SDK_BIN) generate \ + --source=./kinds/ \ + --gogenpath=./pkg/apis \ + --grouping=group \ + --genoperatorstate=false \ + --defencoding=none .PHONY: build build: generate diff --git a/apps/provisioning/go.mod b/apps/provisioning/go.mod index 0d32c925320..50127e16599 100644 --- a/apps/provisioning/go.mod +++ b/apps/provisioning/go.mod @@ -5,7 +5,7 @@ go 1.24.6 require ( github.com/google/go-github/v70 v70.0.0 github.com/google/uuid v1.6.0 - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana/apps/secret v0.0.0-20250902093454-b56b7add012f github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250804150913-990f1c69ecc2 @@ -40,7 +40,7 @@ require ( github.com/google/go-github/v64 v64.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // indirect + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grafana/grafana-app-sdk v0.46.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -75,7 +75,7 @@ require ( golang.org/x/time v0.13.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/apps/provisioning/go.sum b/apps/provisioning/go.sum index 0e22e226538..5732e218b44 100644 --- a/apps/provisioning/go.sum +++ b/apps/provisioning/go.sum @@ -52,10 +52,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.46.0 h1:gvzQvCQgZJ/73BfAcbDt/6TAMhnVikVPxZt/UwDl+oc= @@ -224,8 +224,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/provisioning/pkg/apis/provisioning/v0alpha1/jobs.go b/apps/provisioning/pkg/apis/provisioning/v0alpha1/jobs.go index 847da88fbc5..b96fb1a6d27 100644 --- a/apps/provisioning/pkg/apis/provisioning/v0alpha1/jobs.go +++ b/apps/provisioning/pkg/apis/provisioning/v0alpha1/jobs.go @@ -221,9 +221,9 @@ func (in JobStatus) ToSyncStatus(jobId string) SyncStatus { } type JobResourceSummary struct { - Group string `json:"group,omitempty"` - Resource string `json:"resource,omitempty"` - Total int64 `json:"total,omitempty"` // the count (if known) + Group string `json:"group,omitempty"` + Kind string `json:"kind,omitempty"` + Total int64 `json:"total,omitempty"` // the count (if known) Create int64 `json:"create,omitempty"` Update int64 `json:"update,omitempty"` diff --git a/apps/provisioning/pkg/apis/provisioning/v0alpha1/zz_generated.openapi.go b/apps/provisioning/pkg/apis/provisioning/v0alpha1/zz_generated.openapi.go index 74f7b7d165f..18c385ce59c 100644 --- a/apps/provisioning/pkg/apis/provisioning/v0alpha1/zz_generated.openapi.go +++ b/apps/provisioning/pkg/apis/provisioning/v0alpha1/zz_generated.openapi.go @@ -846,7 +846,7 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen Format: "", }, }, - "resource": { + "kind": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, Format: "", diff --git a/apps/provisioning/pkg/generated/applyconfiguration/provisioning/v0alpha1/jobresourcesummary.go b/apps/provisioning/pkg/generated/applyconfiguration/provisioning/v0alpha1/jobresourcesummary.go index 75b69663794..ed6a62f651a 100644 --- a/apps/provisioning/pkg/generated/applyconfiguration/provisioning/v0alpha1/jobresourcesummary.go +++ b/apps/provisioning/pkg/generated/applyconfiguration/provisioning/v0alpha1/jobresourcesummary.go @@ -7,16 +7,16 @@ package v0alpha1 // JobResourceSummaryApplyConfiguration represents a declarative configuration of the JobResourceSummary type for use // with apply. type JobResourceSummaryApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Resource *string `json:"resource,omitempty"` - Total *int64 `json:"total,omitempty"` - Create *int64 `json:"create,omitempty"` - Update *int64 `json:"update,omitempty"` - Delete *int64 `json:"delete,omitempty"` - Write *int64 `json:"write,omitempty"` - Error *int64 `json:"error,omitempty"` - Noop *int64 `json:"noop,omitempty"` - Errors []string `json:"errors,omitempty"` + Group *string `json:"group,omitempty"` + Kind *string `json:"kind,omitempty"` + Total *int64 `json:"total,omitempty"` + Create *int64 `json:"create,omitempty"` + Update *int64 `json:"update,omitempty"` + Delete *int64 `json:"delete,omitempty"` + Write *int64 `json:"write,omitempty"` + Error *int64 `json:"error,omitempty"` + Noop *int64 `json:"noop,omitempty"` + Errors []string `json:"errors,omitempty"` } // JobResourceSummaryApplyConfiguration constructs a declarative configuration of the JobResourceSummary type for use with @@ -33,11 +33,11 @@ func (b *JobResourceSummaryApplyConfiguration) WithGroup(value string) *JobResou return b } -// WithResource sets the Resource field in the declarative configuration to the given value +// WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Resource field is set to the value of the last call. -func (b *JobResourceSummaryApplyConfiguration) WithResource(value string) *JobResourceSummaryApplyConfiguration { - b.Resource = &value +// If called multiple times, the Kind field is set to the value of the last call. +func (b *JobResourceSummaryApplyConfiguration) WithKind(value string) *JobResourceSummaryApplyConfiguration { + b.Kind = &value return b } diff --git a/apps/provisioning/pkg/repository/github/client.go b/apps/provisioning/pkg/repository/github/client.go index 5b21a5a4f1e..00f1ca0946b 100644 --- a/apps/provisioning/pkg/repository/github/client.go +++ b/apps/provisioning/pkg/repository/github/client.go @@ -13,6 +13,7 @@ import ( // API errors that we need to convey after parsing real GH errors (or faking them). var ( ErrResourceNotFound = errors.New("the resource does not exist") + ErrUnauthorized = errors.New("unauthorized") //lint:ignore ST1005 this is not punctuation ErrServiceUnavailable = apierrors.NewServiceUnavailable("github is unavailable") ErrTooManyItems = errors.New("maximum number of items exceeded") diff --git a/apps/provisioning/pkg/repository/github/impl.go b/apps/provisioning/pkg/repository/github/impl.go index c9f5468bc8d..c80f1e7a7f9 100644 --- a/apps/provisioning/pkg/repository/github/impl.go +++ b/apps/provisioning/pkg/repository/github/impl.go @@ -199,6 +199,9 @@ func (r *githubClient) DeleteWebhook(ctx context.Context, owner, repository stri if ghErr.Response.StatusCode == http.StatusNotFound { return ErrResourceNotFound } + if ghErr.Response.StatusCode == http.StatusUnauthorized || ghErr.Response.StatusCode == http.StatusForbidden { + return ErrUnauthorized + } return err } diff --git a/apps/provisioning/pkg/repository/github/impl_test.go b/apps/provisioning/pkg/repository/github/impl_test.go index fedbc2a9850..57f954e906e 100644 --- a/apps/provisioning/pkg/repository/github/impl_test.go +++ b/apps/provisioning/pkg/repository/github/impl_test.go @@ -975,6 +975,27 @@ func TestGithubClient_DeleteWebhook(t *testing.T) { webhookID: 789, wantErr: ErrServiceUnavailable, }, + { + name: "unauthorized to delete the webhook", + mockHandler: mockhub.NewMockedHTTPClient( + mockhub.WithRequestMatchHandler( + mockhub.DeleteReposHooksByOwnerByRepoByHookId, + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + require.NoError(t, json.NewEncoder(w).Encode(github.ErrorResponse{ + Response: &http.Response{ + StatusCode: http.StatusUnauthorized, + }, + Message: "401 bad credentials", + })) + }), + ), + ), + owner: "test-owner", + repository: "test-repo", + webhookID: 789, + wantErr: ErrUnauthorized, + }, { name: "other error", mockHandler: mockhub.NewMockedHTTPClient( diff --git a/apps/provisioning/pkg/repository/github/testdata/webhook-push-keep_file_only.json b/apps/provisioning/pkg/repository/github/testdata/webhook-push-keep_file_only.json new file mode 100644 index 00000000000..f4ab95c170e --- /dev/null +++ b/apps/provisioning/pkg/repository/github/testdata/webhook-push-keep_file_only.json @@ -0,0 +1,105 @@ +{ + "ref": "refs/heads/main", + "before": "72096e3adc646c5a5b8a91744f962b12bac06045", + "after": "1234567890abcdef1234567890abcdef12345678", + "repository": { + "id": 888020043, + "node_id": "R_kgDONO4cSw", + "name": "git-ui-sync-demo", + "full_name": "grafana/git-ui-sync-demo", + "private": true, + "owner": { + "name": "grafana", + "email": "hello@grafana.com", + "login": "grafana", + "id": 7195757, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjcxOTU3NTc=", + "avatar_url": "https://avatars.githubusercontent.com/u/7195757?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/grafana", + "html_url": "https://github.com/grafana", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/grafana/git-ui-sync-demo", + "description": "A repository containing Grafana dashboards to demo the Github Sync feature in Grafana.", + "fork": false, + "url": "https://github.com/grafana/git-ui-sync-demo", + "default_branch": "main", + "master_branch": "main", + "organization": "grafana" + }, + "pusher": { + "name": "testuser", + "email": "test@grafana.com" + }, + "organization": { + "login": "grafana", + "id": 7195757, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjcxOTU3NTc=", + "url": "https://api.github.com/orgs/grafana", + "avatar_url": "https://avatars.githubusercontent.com/u/7195757?v=4" + }, + "sender": { + "login": "testuser", + "id": 123456, + "node_id": "MDQ6VXNlcjEyMzQ1Ng==", + "avatar_url": "https://avatars.githubusercontent.com/u/123456?v=4", + "type": "User", + "site_admin": false + }, + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/grafana/git-ui-sync-demo/compare/72096e3adc64...1234567890ab", + "commits": [ + { + "id": "1234567890abcdef1234567890abcdef12345678", + "tree_id": "abcdef1234567890abcdef1234567890abcdef12", + "distinct": true, + "message": "Remove empty folder by deleting .keep file", + "timestamp": "2024-12-09T11:00:48+03:00", + "url": "https://github.com/grafana/git-ui-sync-demo/commit/1234567890abcdef1234567890abcdef12345678", + "author": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "committer": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "added": [], + "removed": [ + "empty-folder/.keep" + ], + "modified": [] + } + ], + "head_commit": { + "id": "1234567890abcdef1234567890abcdef12345678", + "tree_id": "abcdef1234567890abcdef1234567890abcdef12", + "distinct": true, + "message": "Remove empty folder by deleting .keep file", + "timestamp": "2024-12-09T11:00:48+03:00", + "url": "https://github.com/grafana/git-ui-sync-demo/commit/1234567890abcdef1234567890abcdef12345678", + "author": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "committer": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "added": [], + "removed": [ + "empty-folder/.keep" + ], + "modified": [] + } +} + diff --git a/apps/provisioning/pkg/repository/github/testdata/webhook-push-keep_file_with_others.json b/apps/provisioning/pkg/repository/github/testdata/webhook-push-keep_file_with_others.json new file mode 100644 index 00000000000..9a70ca4997c --- /dev/null +++ b/apps/provisioning/pkg/repository/github/testdata/webhook-push-keep_file_with_others.json @@ -0,0 +1,109 @@ +{ + "ref": "refs/heads/main", + "before": "72096e3adc646c5a5b8a91744f962b12bac06045", + "after": "2345678901bcdef2345678901bcdef2345678901", + "repository": { + "id": 888020043, + "node_id": "R_kgDONO4cSw", + "name": "git-ui-sync-demo", + "full_name": "grafana/git-ui-sync-demo", + "private": true, + "owner": { + "name": "grafana", + "email": "hello@grafana.com", + "login": "grafana", + "id": 7195757, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjcxOTU3NTc=", + "avatar_url": "https://avatars.githubusercontent.com/u/7195757?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/grafana", + "html_url": "https://github.com/grafana", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/grafana/git-ui-sync-demo", + "description": "A repository containing Grafana dashboards to demo the Github Sync feature in Grafana.", + "fork": false, + "url": "https://github.com/grafana/git-ui-sync-demo", + "default_branch": "main", + "master_branch": "main", + "organization": "grafana" + }, + "pusher": { + "name": "testuser", + "email": "test@grafana.com" + }, + "organization": { + "login": "grafana", + "id": 7195757, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjcxOTU3NTc=", + "url": "https://api.github.com/orgs/grafana", + "avatar_url": "https://avatars.githubusercontent.com/u/7195757?v=4" + }, + "sender": { + "login": "testuser", + "id": 123456, + "node_id": "MDQ6VXNlcjEyMzQ1Ng==", + "avatar_url": "https://avatars.githubusercontent.com/u/123456?v=4", + "type": "User", + "site_admin": false + }, + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/grafana/git-ui-sync-demo/compare/72096e3adc64...2345678901bc", + "commits": [ + { + "id": "2345678901bcdef2345678901bcdef2345678901", + "tree_id": "bcdef2345678901bcdef2345678901bcdef23456", + "distinct": true, + "message": "Remove folder with .keep and dashboard files", + "timestamp": "2024-12-09T11:00:48+03:00", + "url": "https://github.com/grafana/git-ui-sync-demo/commit/2345678901bcdef2345678901bcdef2345678901", + "author": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "committer": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "added": [], + "removed": [ + "dashboards/.keep", + "dashboards/dashboard1.json", + "dashboards/dashboard2.json" + ], + "modified": [] + } + ], + "head_commit": { + "id": "2345678901bcdef2345678901bcdef2345678901", + "tree_id": "bcdef2345678901bcdef2345678901bcdef23456", + "distinct": true, + "message": "Remove folder with .keep and dashboard files", + "timestamp": "2024-12-09T11:00:48+03:00", + "url": "https://github.com/grafana/git-ui-sync-demo/commit/2345678901bcdef2345678901bcdef2345678901", + "author": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "committer": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "added": [], + "removed": [ + "dashboards/.keep", + "dashboards/dashboard1.json", + "dashboards/dashboard2.json" + ], + "modified": [] + } +} + diff --git a/apps/provisioning/pkg/repository/github/testdata/webhook-push-multiple_keep_files.json b/apps/provisioning/pkg/repository/github/testdata/webhook-push-multiple_keep_files.json new file mode 100644 index 00000000000..cbb82242aa3 --- /dev/null +++ b/apps/provisioning/pkg/repository/github/testdata/webhook-push-multiple_keep_files.json @@ -0,0 +1,109 @@ +{ + "ref": "refs/heads/main", + "before": "72096e3adc646c5a5b8a91744f962b12bac06045", + "after": "3456789012cdef3456789012cdef3456789012cd", + "repository": { + "id": 888020043, + "node_id": "R_kgDONO4cSw", + "name": "git-ui-sync-demo", + "full_name": "grafana/git-ui-sync-demo", + "private": true, + "owner": { + "name": "grafana", + "email": "hello@grafana.com", + "login": "grafana", + "id": 7195757, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjcxOTU3NTc=", + "avatar_url": "https://avatars.githubusercontent.com/u/7195757?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/grafana", + "html_url": "https://github.com/grafana", + "type": "Organization", + "site_admin": false + }, + "html_url": "https://github.com/grafana/git-ui-sync-demo", + "description": "A repository containing Grafana dashboards to demo the Github Sync feature in Grafana.", + "fork": false, + "url": "https://github.com/grafana/git-ui-sync-demo", + "default_branch": "main", + "master_branch": "main", + "organization": "grafana" + }, + "pusher": { + "name": "testuser", + "email": "test@grafana.com" + }, + "organization": { + "login": "grafana", + "id": 7195757, + "node_id": "MDEyOk9yZ2FuaXphdGlvbjcxOTU3NTc=", + "url": "https://api.github.com/orgs/grafana", + "avatar_url": "https://avatars.githubusercontent.com/u/7195757?v=4" + }, + "sender": { + "login": "testuser", + "id": 123456, + "node_id": "MDQ6VXNlcjEyMzQ1Ng==", + "avatar_url": "https://avatars.githubusercontent.com/u/123456?v=4", + "type": "User", + "site_admin": false + }, + "created": false, + "deleted": false, + "forced": false, + "base_ref": null, + "compare": "https://github.com/grafana/git-ui-sync-demo/compare/72096e3adc64...3456789012cd", + "commits": [ + { + "id": "3456789012cdef3456789012cdef3456789012cd", + "tree_id": "cdef3456789012cdef3456789012cdef34567890", + "distinct": true, + "message": "Remove multiple folders, some with only .keep files", + "timestamp": "2024-12-09T11:00:48+03:00", + "url": "https://github.com/grafana/git-ui-sync-demo/commit/3456789012cdef3456789012cdef3456789012cd", + "author": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "committer": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "added": [], + "removed": [ + "empty-folder1/.keep", + "dashboards-to-delete/.keep", + "dashboards-to-delete/dashboard.json" + ], + "modified": [] + } + ], + "head_commit": { + "id": "3456789012cdef3456789012cdef3456789012cd", + "tree_id": "cdef3456789012cdef3456789012cdef34567890", + "distinct": true, + "message": "Remove multiple folders, some with only .keep files", + "timestamp": "2024-12-09T11:00:48+03:00", + "url": "https://github.com/grafana/git-ui-sync-demo/commit/3456789012cdef3456789012cdef3456789012cd", + "author": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "committer": { + "name": "Test User", + "email": "test@grafana.com", + "username": "testuser" + }, + "added": [], + "removed": [ + "empty-folder1/.keep", + "dashboards-to-delete/.keep", + "dashboards-to-delete/dashboard.json" + ], + "modified": [] + } +} + diff --git a/apps/provisioning/pkg/repository/github/webhook.go b/apps/provisioning/pkg/repository/github/webhook.go index 3c37b00cba7..40f87ddc93d 100644 --- a/apps/provisioning/pkg/repository/github/webhook.go +++ b/apps/provisioning/pkg/repository/github/webhook.go @@ -7,6 +7,7 @@ import ( "log/slog" "net/http" "slices" + "strings" "github.com/google/go-github/v70/github" "github.com/google/uuid" @@ -15,6 +16,7 @@ import ( "github.com/grafana/grafana-app-sdk/logging" provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" "github.com/grafana/grafana/apps/provisioning/pkg/repository" + "github.com/grafana/grafana/apps/provisioning/pkg/safepath" common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1" ) @@ -119,13 +121,38 @@ func (r *githubWebhookRepository) parsePushEvent(event *github.PushEvent) (*prov return &provisioning.WebhookResponse{Code: http.StatusOK}, nil } + // whenever possible, we want to do incremental syncs to keep things performant. + // however, if we get an event where just a .keep file is being deleted, and no other files in the folder + // are being deleted, the folder could be gone from git, but not from grafana and we do not have a way + // to get the grafana uid to delete the folder. so, instead, we will queue a full sync to clean things up. + dirsWithKeepDeletes := make(map[string]struct{}) + dirsWithOtherDeletes := make(map[string]struct{}) + for _, change := range event.GetCommits() { + for _, removedFile := range change.Removed { + dir := safepath.Dir(removedFile) + if strings.HasSuffix(removedFile, ".keep") { + dirsWithKeepDeletes[dir] = struct{}{} + } else { + dirsWithOtherDeletes[dir] = struct{}{} + } + } + } + // if there are any keep files deleted that do not have other files deleted in the same folder, we need to queue a full sync + incremental := true + for dir := range dirsWithKeepDeletes { + if _, exists := dirsWithOtherDeletes[dir]; !exists { + incremental = false + break + } + } + return &provisioning.WebhookResponse{ Code: http.StatusAccepted, Job: &provisioning.JobSpec{ Repository: r.config.GetName(), Action: provisioning.JobActionPull, Pull: &provisioning.SyncJobOptions{ - Incremental: true, + Incremental: incremental, }, }, }, nil @@ -274,11 +301,15 @@ func (r *githubWebhookRepository) deleteWebhook(ctx context.Context) error { id := r.config.Status.Webhook.ID err := r.gh.DeleteWebhook(ctx, r.owner, r.repo, id) - if err != nil && !errors.Is(err, ErrResourceNotFound) { + if err != nil && !errors.Is(err, ErrResourceNotFound) && !errors.Is(err, ErrUnauthorized) { return fmt.Errorf("delete webhook: %w", err) } if errors.Is(err, ErrResourceNotFound) { - logger.Info("webhook does not exist", "url", r.config.Status.Webhook.URL, "id", id) + logger.Warn("webhook no longer exists", "url", r.config.Status.Webhook.URL, "id", id) + return nil + } + if errors.Is(err, ErrUnauthorized) { + logger.Warn("webhook deletion failed. no longer authorized to delete this webhook", "url", r.config.Status.Webhook.URL, "id", id) return nil } diff --git a/apps/provisioning/pkg/repository/github/webhook_test.go b/apps/provisioning/pkg/repository/github/webhook_test.go index 5021623fe90..5dc23c70e34 100644 --- a/apps/provisioning/pkg/repository/github/webhook_test.go +++ b/apps/provisioning/pkg/repository/github/webhook_test.go @@ -69,6 +69,36 @@ func TestParseWebhooks(t *testing.T) { }, }, }}, + {"push", "keep_file_only", provisioning.WebhookResponse{ + Code: http.StatusAccepted, + Job: &provisioning.JobSpec{ + Repository: "unit-test-repo", + Action: provisioning.JobActionPull, + Pull: &provisioning.SyncJobOptions{ + Incremental: false, + }, + }, + }}, + {"push", "keep_file_with_others", provisioning.WebhookResponse{ + Code: http.StatusAccepted, + Job: &provisioning.JobSpec{ + Repository: "unit-test-repo", + Action: provisioning.JobActionPull, + Pull: &provisioning.SyncJobOptions{ + Incremental: true, + }, + }, + }}, + {"push", "multiple_keep_files", provisioning.WebhookResponse{ + Code: http.StatusAccepted, + Job: &provisioning.JobSpec{ + Repository: "unit-test-repo", + Action: provisioning.JobActionPull, + Pull: &provisioning.SyncJobOptions{ + Incremental: false, + }, + }, + }}, {"issue_comment", "created", provisioning.WebhookResponse{ Code: http.StatusNotImplemented, }}, @@ -1565,6 +1595,32 @@ func TestGitHubRepository_OnDelete(t *testing.T) { // We don't return an error if the webhook is already gone expectedError: nil, }, + { + name: "unauthorized to delete the webhook", + setupMock: func(m *MockClient) { + m.On("DeleteWebhook", mock.Anything, "grafana", "grafana", int64(123)). + Return(ErrUnauthorized) + }, + config: &provisioning.Repository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-repo", + }, + Spec: provisioning.RepositorySpec{ + GitHub: &provisioning.GitHubRepositoryConfig{ + Branch: "main", + }, + }, + Status: provisioning.RepositoryStatus{ + Webhook: &provisioning.WebhookStatus{ + ID: 123, + URL: "https://example.com/webhook", + }, + }, + }, + webhookURL: "https://example.com/webhook", + // We don't return an error if access to the webhook is revoked + expectedError: nil, + }, { name: "no webhook URL provided", setupMock: func(_ *MockClient) {}, diff --git a/apps/provisioning/pkg/repository/tester.go b/apps/provisioning/pkg/repository/tester.go new file mode 100644 index 00000000000..6bd65443d92 --- /dev/null +++ b/apps/provisioning/pkg/repository/tester.go @@ -0,0 +1,84 @@ +package repository + +import ( + "context" + "net/http" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" +) + +// SimpleRepositoryTester will validate the repository configuration, and then proceed to test the connection to the repository +type SimpleRepositoryTester struct { + validator RepositoryValidator +} + +func NewSimpleRepositoryTester(validator RepositoryValidator) SimpleRepositoryTester { + return SimpleRepositoryTester{ + validator: validator, + } +} + +// TestRepository validates the repository and then runs a health check +func (t *SimpleRepositoryTester) TestRepository(ctx context.Context, repo Repository) (*provisioning.TestResults, error) { + errors := t.validator.ValidateRepository(repo) + if len(errors) > 0 { + rsp := &provisioning.TestResults{ + Code: http.StatusUnprocessableEntity, // Invalid + Success: false, + Errors: make([]provisioning.ErrorDetails, len(errors)), + } + for i, err := range errors { + rsp.Errors[i] = provisioning.ErrorDetails{ + Type: metav1.CauseType(err.Type), + Field: err.Field, + Detail: err.Detail, + } + } + return rsp, nil + } + + return repo.Test(ctx) +} + +type VerifyAgainstExistingRepositories func(ctx context.Context, cfg *provisioning.Repository) *field.Error // defined this way to prevent an import cycle + +// RepositoryTesterWithExistingChecker will validate the repository configuration, run a health check, and then compare it against existing repositories +type RepositoryTesterWithExistingChecker struct { + tester SimpleRepositoryTester + verify VerifyAgainstExistingRepositories +} + +func NewRepositoryTesterWithExistingChecker(tester SimpleRepositoryTester, verify VerifyAgainstExistingRepositories) RepositoryTesterWithExistingChecker { + return RepositoryTesterWithExistingChecker{ + tester: tester, + verify: verify, + } +} + +// TestRepositoryAndCheckExisting validates the repository, runs a health check, and then compares it against existing repositories +func (c *RepositoryTesterWithExistingChecker) TestRepositoryAndCheckExisting(ctx context.Context, repo Repository) (*provisioning.TestResults, error) { + rsp, err := c.tester.TestRepository(ctx, repo) + if err != nil { + return nil, err + } + + if rsp.Success { + cfg := repo.Config() + if validationErr := c.verify(ctx, cfg); validationErr != nil { + rsp = &provisioning.TestResults{ + Success: false, + Code: http.StatusUnprocessableEntity, + Errors: []provisioning.ErrorDetails{{ + Type: metav1.CauseType(validationErr.Type), + Field: validationErr.Field, + Detail: validationErr.Detail, + }}, + } + } + } + + return rsp, nil +} diff --git a/apps/provisioning/pkg/repository/tester_test.go b/apps/provisioning/pkg/repository/tester_test.go new file mode 100644 index 00000000000..91f29d15ceb --- /dev/null +++ b/apps/provisioning/pkg/repository/tester_test.go @@ -0,0 +1,204 @@ +package repository + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" +) + +func TestTestRepository(t *testing.T) { + tests := []struct { + name string + repository *MockRepository + expectedCode int + expectedErrs []provisioning.ErrorDetails + expectedError error + }{ + { + name: "validation fails", + repository: func() *MockRepository { + m := NewMockRepository(t) + m.On("Config").Return(&provisioning.Repository{ + Spec: provisioning.RepositorySpec{ + // Missing required title + }, + }) + m.On("Validate").Return(field.ErrorList{}) + return m + }(), + expectedCode: http.StatusUnprocessableEntity, + expectedErrs: []provisioning.ErrorDetails{{ + Type: metav1.CauseTypeFieldValueRequired, + Field: "spec.title", + Detail: "a repository title must be given", + }}, + }, + { + name: "test passes", + repository: func() *MockRepository { + m := NewMockRepository(t) + m.On("Config").Return(&provisioning.Repository{ + Spec: provisioning.RepositorySpec{ + Title: "Test Repo", + }, + }) + m.On("Validate").Return(field.ErrorList{}) + m.On("Test", mock.Anything).Return(&provisioning.TestResults{ + Code: http.StatusOK, + Success: true, + }, nil) + return m + }(), + expectedCode: http.StatusOK, + expectedErrs: nil, + }, + { + name: "test fails with error", + repository: func() *MockRepository { + m := NewMockRepository(t) + m.On("Config").Return(&provisioning.Repository{ + Spec: provisioning.RepositorySpec{ + Title: "Test Repo", + }, + }) + m.On("Validate").Return(field.ErrorList{}) + m.On("Test", mock.Anything).Return(nil, fmt.Errorf("test error")) + return m + }(), + expectedError: fmt.Errorf("test error"), + }, + { + name: "test fails with results", + repository: func() *MockRepository { + m := NewMockRepository(t) + m.On("Config").Return(&provisioning.Repository{ + Spec: provisioning.RepositorySpec{ + Title: "Test Repo", + }, + }) + m.On("Validate").Return(field.ErrorList{}) + m.On("Test", mock.Anything).Return(&provisioning.TestResults{ + Code: http.StatusBadRequest, + Success: false, + Errors: []provisioning.ErrorDetails{{ + Type: metav1.CauseTypeFieldValueInvalid, + Field: "spec.property", + }}, + }, nil) + return m + }(), + expectedCode: http.StatusBadRequest, + expectedErrs: []provisioning.ErrorDetails{{ + Type: metav1.CauseTypeFieldValueInvalid, + Field: "spec.property", + }}, + }, + } + + tester := NewSimpleRepositoryTester(NewValidator(10*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true)) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + results, err := tester.TestRepository(context.Background(), tt.repository) + + if tt.expectedError != nil { + require.Error(t, err) + require.Equal(t, tt.expectedError.Error(), err.Error()) + return + } + + require.NoError(t, err) + require.NotNil(t, results) + require.Equal(t, tt.expectedCode, results.Code) + + if tt.expectedErrs != nil { + require.Equal(t, tt.expectedErrs, results.Errors) + require.False(t, results.Success) + } else { + require.True(t, results.Success) + require.Empty(t, results.Errors) + } + }) + } +} + +func TestTester_TestRepository(t *testing.T) { + repository := NewMockRepository(t) + repository.On("Config").Return(&provisioning.Repository{ + Spec: provisioning.RepositorySpec{ + Title: "Test Repo", + }, + }) + repository.On("Validate").Return(field.ErrorList{}) + repository.On("Test", mock.Anything).Return(&provisioning.TestResults{ + Code: http.StatusOK, + Success: true, + }, nil) + + tester := NewSimpleRepositoryTester(NewValidator(10*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true)) + results, err := tester.TestRepository(context.Background(), repository) + require.NoError(t, err) + require.NotNil(t, results) + require.Equal(t, http.StatusOK, results.Code) + require.True(t, results.Success) +} + +func TestFromFieldError(t *testing.T) { + tests := []struct { + name string + fieldError *field.Error + expectedCode int + expectedField string + expectedType metav1.CauseType + expectedDetail string + }{ + { + name: "required field error", + fieldError: &field.Error{ + Type: field.ErrorTypeRequired, + Field: "spec.title", + Detail: "a repository title must be given", + }, + expectedCode: http.StatusBadRequest, + expectedField: "spec.title", + expectedType: metav1.CauseTypeFieldValueRequired, + expectedDetail: "a repository title must be given", + }, + { + name: "not supported field error", + fieldError: &field.Error{ + Type: field.ErrorTypeNotSupported, + Field: "spec.workflow", + Detail: "branch is only supported on git repositories", + }, + expectedCode: http.StatusBadRequest, + expectedField: "spec.workflow", + expectedType: metav1.CauseTypeFieldValueNotSupported, + expectedDetail: "branch is only supported on git repositories", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FromFieldError(tt.fieldError) + + require.NotNil(t, result) + require.Equal(t, tt.expectedCode, result.Code) + require.False(t, result.Success) + require.Len(t, result.Errors, 1) + + errorDetail := result.Errors[0] + require.Equal(t, tt.expectedField, errorDetail.Field) + require.Equal(t, tt.expectedType, errorDetail.Type) + require.Equal(t, tt.expectedDetail, errorDetail.Detail) + }) + } +} diff --git a/apps/provisioning/pkg/repository/test.go b/apps/provisioning/pkg/repository/validator.go similarity index 58% rename from apps/provisioning/pkg/repository/test.go rename to apps/provisioning/pkg/repository/validator.go index 4c81013c477..73198f0d7ee 100644 --- a/apps/provisioning/pkg/repository/test.go +++ b/apps/provisioning/pkg/repository/validator.go @@ -1,10 +1,10 @@ package repository import ( - "context" "fmt" "net/http" "slices" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" @@ -12,57 +12,27 @@ import ( provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" ) -// RepositoryValidator interface for validating repositories against existing ones -type RepositoryValidator interface { - VerifyAgainstExistingRepositories(ctx context.Context, cfg *provisioning.Repository) *field.Error +type RepositoryValidator struct { + allowedTargets []provisioning.SyncTargetType + allowImageRendering bool + minSyncInterval time.Duration } -func TestRepository(ctx context.Context, repo Repository) (*provisioning.TestResults, error) { - return TestRepositoryWithValidator(ctx, repo, nil) +func NewValidator(minSyncInterval time.Duration, allowedTargets []provisioning.SyncTargetType, allowImageRendering bool) RepositoryValidator { + // do not allow minsync interval to be less than 10 + if minSyncInterval <= 10*time.Second { + minSyncInterval = 10 * time.Second + } + + return RepositoryValidator{ + allowedTargets: allowedTargets, + allowImageRendering: allowImageRendering, + minSyncInterval: minSyncInterval, + } } -func TestRepositoryWithValidator(ctx context.Context, repo Repository, validator RepositoryValidator) (*provisioning.TestResults, error) { - errors := ValidateRepository(repo) - if len(errors) > 0 { - rsp := &provisioning.TestResults{ - Code: http.StatusUnprocessableEntity, // Invalid - Success: false, - Errors: make([]provisioning.ErrorDetails, len(errors)), - } - for i, err := range errors { - rsp.Errors[i] = provisioning.ErrorDetails{ - Type: metav1.CauseType(err.Type), - Field: err.Field, - Detail: err.Detail, - } - } - return rsp, nil - } - - rsp, err := repo.Test(ctx) - if err != nil { - return nil, err - } - - if rsp.Success && validator != nil { - cfg := repo.Config() - if validationErr := validator.VerifyAgainstExistingRepositories(ctx, cfg); validationErr != nil { - rsp = &provisioning.TestResults{ - Success: false, - Code: http.StatusUnprocessableEntity, - Errors: []provisioning.ErrorDetails{{ - Type: metav1.CauseType(validationErr.Type), - Field: validationErr.Field, - Detail: validationErr.Detail, - }}, - } - } - } - - return rsp, nil -} - -func ValidateRepository(repo Repository) field.ErrorList { +// ValidateRepository solely does configuration checks on the repository object. It does not run a health check or compare against existing repositories. +func (v *RepositoryValidator) ValidateRepository(repo Repository) field.ErrorList { list := repo.Validate() cfg := repo.Config() @@ -70,14 +40,22 @@ func ValidateRepository(repo Repository) field.ErrorList { list = append(list, field.Required(field.NewPath("spec", "title"), "a repository title must be given")) } - if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.Target == "" { - list = append(list, field.Required(field.NewPath("spec", "sync", "target"), - "The target type is required when sync is enabled")) - } + if cfg.Spec.Sync.Enabled { + if cfg.Spec.Sync.Target == "" { + list = append(list, field.Required(field.NewPath("spec", "sync", "target"), + "The target type is required when sync is enabled")) + } else if !slices.Contains(v.allowedTargets, cfg.Spec.Sync.Target) { + list = append(list, + field.Invalid( + field.NewPath("spec", "target"), + cfg.Spec.Sync.Target, + "sync target is not supported")) + } - if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < 10 { - list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"), - cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", 10))) + if cfg.Spec.Sync.IntervalSeconds < int64(v.minSyncInterval.Seconds()) { + list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"), + cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", int64(v.minSyncInterval.Seconds())))) + } } // Reserved names (for now) @@ -136,6 +114,13 @@ func ValidateRepository(repo Repository) field.ErrorList { } } + if !v.allowImageRendering && cfg.Spec.GitHub != nil && cfg.Spec.GitHub.GenerateDashboardPreviews { + list = append(list, + field.Invalid(field.NewPath("spec", "generateDashboardPreviews"), + cfg.Spec.GitHub.GenerateDashboardPreviews, + "image rendering is not enabled")) + } + return list } diff --git a/apps/provisioning/pkg/repository/test_test.go b/apps/provisioning/pkg/repository/validator_test.go similarity index 61% rename from apps/provisioning/pkg/repository/test_test.go rename to apps/provisioning/pkg/repository/validator_test.go index 752f28bb979..cb8b726a9bf 100644 --- a/apps/provisioning/pkg/repository/test_test.go +++ b/apps/provisioning/pkg/repository/validator_test.go @@ -1,12 +1,9 @@ package repository import ( - "context" - "fmt" - "net/http" "testing" + "time" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" @@ -83,7 +80,7 @@ func TestValidateRepository(t *testing.T) { Title: "Test Repo", Sync: provisioning.SyncOptions{ Enabled: true, - Target: "test", + Target: provisioning.SyncTargetTypeFolder, IntervalSeconds: 5, }, }, @@ -154,6 +151,27 @@ func TestValidateRepository(t *testing.T) { require.Contains(t, errors.ToAggregate().Error(), "spec.github: Invalid value") }, }, + { + name: "github enabled when image rendering is not allowed", + repository: func() *MockRepository { + m := NewMockRepository(t) + m.On("Config").Return(&provisioning.Repository{ + Spec: provisioning.RepositorySpec{ + Title: "Test Repo", + Type: provisioning.GitHubRepositoryType, + GitHub: &provisioning.GitHubRepositoryConfig{ + GenerateDashboardPreviews: true, + }, + }, + }) + m.On("Validate").Return(field.ErrorList{}) + return m + }(), + expectedErrs: 1, + validateError: func(t *testing.T, errors field.ErrorList) { + require.Contains(t, errors.ToAggregate().Error(), "spec.generateDashboardPreviews: Invalid value") + }, + }, { name: "mismatched git config", repository: func() *MockRepository { @@ -185,17 +203,18 @@ func TestValidateRepository(t *testing.T) { Sync: provisioning.SyncOptions{ Enabled: true, IntervalSeconds: 5, + Target: provisioning.SyncTargetTypeInstance, }, }, }) m.On("Validate").Return(field.ErrorList{}) return m }(), - expectedErrs: 4, // Updated from 3 to 4 to match actual errors: + expectedErrs: 4, // 1. missing title // 2. sync target missing - // 3. sync interval too low - // 4. reserved name + // 3. reserved name + // 4. sync target not supported }, { name: "branch workflow for non-github repository", @@ -281,9 +300,10 @@ func TestValidateRepository(t *testing.T) { }, } + validator := NewValidator(10*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder}, false) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - errors := ValidateRepository(tt.repository) + errors := validator.ValidateRepository(tt.repository) require.Len(t, errors, tt.expectedErrs) if tt.validateError != nil { tt.validateError(t, errors) @@ -291,201 +311,3 @@ func TestValidateRepository(t *testing.T) { }) } } - -func TestTestRepository(t *testing.T) { - tests := []struct { - name string - repository *MockRepository - expectedCode int - expectedErrs []provisioning.ErrorDetails - expectedError error - }{ - { - name: "validation fails", - repository: func() *MockRepository { - m := NewMockRepository(t) - m.On("Config").Return(&provisioning.Repository{ - Spec: provisioning.RepositorySpec{ - // Missing required title - }, - }) - m.On("Validate").Return(field.ErrorList{}) - return m - }(), - expectedCode: http.StatusUnprocessableEntity, - expectedErrs: []provisioning.ErrorDetails{{ - Type: metav1.CauseTypeFieldValueRequired, - Field: "spec.title", - Detail: "a repository title must be given", - }}, - }, - { - name: "test passes", - repository: func() *MockRepository { - m := NewMockRepository(t) - m.On("Config").Return(&provisioning.Repository{ - Spec: provisioning.RepositorySpec{ - Title: "Test Repo", - }, - }) - m.On("Validate").Return(field.ErrorList{}) - m.On("Test", mock.Anything).Return(&provisioning.TestResults{ - Code: http.StatusOK, - Success: true, - }, nil) - return m - }(), - expectedCode: http.StatusOK, - expectedErrs: nil, - }, - { - name: "test fails with error", - repository: func() *MockRepository { - m := NewMockRepository(t) - m.On("Config").Return(&provisioning.Repository{ - Spec: provisioning.RepositorySpec{ - Title: "Test Repo", - }, - }) - m.On("Validate").Return(field.ErrorList{}) - m.On("Test", mock.Anything).Return(nil, fmt.Errorf("test error")) - return m - }(), - expectedError: fmt.Errorf("test error"), - }, - { - name: "test fails with results", - repository: func() *MockRepository { - m := NewMockRepository(t) - m.On("Config").Return(&provisioning.Repository{ - Spec: provisioning.RepositorySpec{ - Title: "Test Repo", - }, - }) - m.On("Validate").Return(field.ErrorList{}) - m.On("Test", mock.Anything).Return(&provisioning.TestResults{ - Code: http.StatusBadRequest, - Success: false, - Errors: []provisioning.ErrorDetails{{ - Type: metav1.CauseTypeFieldValueInvalid, - Field: "spec.property", - }}, - }, nil) - return m - }(), - expectedCode: http.StatusBadRequest, - expectedErrs: []provisioning.ErrorDetails{{ - Type: metav1.CauseTypeFieldValueInvalid, - Field: "spec.property", - }}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - results, err := TestRepository(context.Background(), tt.repository) - - if tt.expectedError != nil { - require.Error(t, err) - require.Equal(t, tt.expectedError.Error(), err.Error()) - return - } - - require.NoError(t, err) - require.NotNil(t, results) - require.Equal(t, tt.expectedCode, results.Code) - - if tt.expectedErrs != nil { - require.Equal(t, tt.expectedErrs, results.Errors) - require.False(t, results.Success) - } else { - require.True(t, results.Success) - require.Empty(t, results.Errors) - } - }) - } -} - -func TestTester_TestRepository(t *testing.T) { - repository := NewMockRepository(t) - repository.On("Config").Return(&provisioning.Repository{ - Spec: provisioning.RepositorySpec{ - Title: "Test Repo", - }, - }) - repository.On("Validate").Return(field.ErrorList{}) - repository.On("Test", mock.Anything).Return(&provisioning.TestResults{ - Code: http.StatusOK, - Success: true, - }, nil) - - results, err := TestRepository(context.Background(), repository) - require.NoError(t, err) - require.NotNil(t, results) - require.Equal(t, http.StatusOK, results.Code) - require.True(t, results.Success) -} - -func TestFromFieldError(t *testing.T) { - tests := []struct { - name string - fieldError *field.Error - expectedCode int - expectedField string - expectedType metav1.CauseType - expectedDetail string - }{ - { - name: "required field error", - fieldError: &field.Error{ - Type: field.ErrorTypeRequired, - Field: "spec.title", - Detail: "a repository title must be given", - }, - expectedCode: http.StatusBadRequest, - expectedField: "spec.title", - expectedType: metav1.CauseTypeFieldValueRequired, - expectedDetail: "a repository title must be given", - }, - { - name: "invalid field error", - fieldError: &field.Error{ - Type: field.ErrorTypeInvalid, - Field: "spec.sync.intervalSeconds", - Detail: "Interval must be at least 10 seconds", - }, - expectedCode: http.StatusBadRequest, - expectedField: "spec.sync.intervalSeconds", - expectedType: metav1.CauseTypeFieldValueInvalid, - expectedDetail: "Interval must be at least 10 seconds", - }, - { - name: "not supported field error", - fieldError: &field.Error{ - Type: field.ErrorTypeNotSupported, - Field: "spec.workflow", - Detail: "branch is only supported on git repositories", - }, - expectedCode: http.StatusBadRequest, - expectedField: "spec.workflow", - expectedType: metav1.CauseTypeFieldValueNotSupported, - expectedDetail: "branch is only supported on git repositories", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := FromFieldError(tt.fieldError) - - require.NotNil(t, result) - require.Equal(t, tt.expectedCode, result.Code) - require.False(t, result.Success) - require.Len(t, result.Errors, 1) - - errorDetail := result.Errors[0] - require.Equal(t, tt.expectedField, errorDetail.Field) - require.Equal(t, tt.expectedType, errorDetail.Type) - require.Equal(t, tt.expectedDetail, errorDetail.Detail) - }) - } -} diff --git a/apps/secret/go.mod b/apps/secret/go.mod index aff8db62196..b035b0c963a 100644 --- a/apps/secret/go.mod +++ b/apps/secret/go.mod @@ -7,7 +7,7 @@ require ( github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250710134100-1f3dc0533caf github.com/stretchr/testify v1.11.1 google.golang.org/grpc v1.75.1 - google.golang.org/protobuf v1.36.8 + google.golang.org/protobuf v1.36.9 gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.34.1 k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 diff --git a/apps/secret/go.sum b/apps/secret/go.sum index d67f7a6842f..4cf7b9857ff 100644 --- a/apps/secret/go.sum +++ b/apps/secret/go.sum @@ -170,8 +170,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/secret/pkg/apis/secret/v1beta1/keeper_client_gen.go b/apps/secret/pkg/apis/secret/v1beta1/keeper_client_gen.go new file mode 100644 index 00000000000..c54e3d6f682 --- /dev/null +++ b/apps/secret/pkg/apis/secret/v1beta1/keeper_client_gen.go @@ -0,0 +1,99 @@ +package v1beta1 + +import ( + "context" + + "github.com/grafana/grafana-app-sdk/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type KeeperClient struct { + client *resource.TypedClient[*Keeper, *KeeperList] +} + +func NewKeeperClient(client resource.Client) *KeeperClient { + return &KeeperClient{ + client: resource.NewTypedClient[*Keeper, *KeeperList](client, KeeperKind()), + } +} + +func NewKeeperClientFromGenerator(generator resource.ClientGenerator) (*KeeperClient, error) { + c, err := generator.ClientFor(KeeperKind()) + if err != nil { + return nil, err + } + return NewKeeperClient(c), nil +} + +func (c *KeeperClient) Get(ctx context.Context, identifier resource.Identifier) (*Keeper, error) { + return c.client.Get(ctx, identifier) +} + +func (c *KeeperClient) List(ctx context.Context, namespace string, opts resource.ListOptions) (*KeeperList, error) { + return c.client.List(ctx, namespace, opts) +} + +func (c *KeeperClient) ListAll(ctx context.Context, namespace string, opts resource.ListOptions) (*KeeperList, error) { + resp, err := c.client.List(ctx, namespace, resource.ListOptions{ + ResourceVersion: opts.ResourceVersion, + Limit: opts.Limit, + LabelFilters: opts.LabelFilters, + FieldSelectors: opts.FieldSelectors, + }) + if err != nil { + return nil, err + } + for resp.GetContinue() != "" { + page, err := c.client.List(ctx, namespace, resource.ListOptions{ + Continue: resp.GetContinue(), + ResourceVersion: opts.ResourceVersion, + Limit: opts.Limit, + LabelFilters: opts.LabelFilters, + FieldSelectors: opts.FieldSelectors, + }) + if err != nil { + return nil, err + } + resp.SetContinue(page.GetContinue()) + resp.SetResourceVersion(page.GetResourceVersion()) + resp.SetItems(append(resp.GetItems(), page.GetItems()...)) + } + return resp, nil +} + +func (c *KeeperClient) Create(ctx context.Context, obj *Keeper, opts resource.CreateOptions) (*Keeper, error) { + // Make sure apiVersion and kind are set + obj.APIVersion = GroupVersion.Identifier() + obj.Kind = KeeperKind().Kind() + return c.client.Create(ctx, obj, opts) +} + +func (c *KeeperClient) Update(ctx context.Context, obj *Keeper, opts resource.UpdateOptions) (*Keeper, error) { + return c.client.Update(ctx, obj, opts) +} + +func (c *KeeperClient) Patch(ctx context.Context, identifier resource.Identifier, req resource.PatchRequest, opts resource.PatchOptions) (*Keeper, error) { + return c.client.Patch(ctx, identifier, req, opts) +} + +func (c *KeeperClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus KeeperStatus, opts resource.UpdateOptions) (*Keeper, error) { + return c.client.Update(ctx, &Keeper{ + TypeMeta: metav1.TypeMeta{ + Kind: KeeperKind().Kind(), + APIVersion: GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: opts.ResourceVersion, + Namespace: identifier.Namespace, + Name: identifier.Name, + }, + Status: newStatus, + }, resource.UpdateOptions{ + Subresource: "status", + ResourceVersion: opts.ResourceVersion, + }) +} + +func (c *KeeperClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { + return c.client.Delete(ctx, identifier, opts) +} diff --git a/apps/secret/pkg/apis/secret/v1beta1/securevalue_client_gen.go b/apps/secret/pkg/apis/secret/v1beta1/securevalue_client_gen.go new file mode 100644 index 00000000000..241d4cac3c2 --- /dev/null +++ b/apps/secret/pkg/apis/secret/v1beta1/securevalue_client_gen.go @@ -0,0 +1,99 @@ +package v1beta1 + +import ( + "context" + + "github.com/grafana/grafana-app-sdk/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type SecureValueClient struct { + client *resource.TypedClient[*SecureValue, *SecureValueList] +} + +func NewSecureValueClient(client resource.Client) *SecureValueClient { + return &SecureValueClient{ + client: resource.NewTypedClient[*SecureValue, *SecureValueList](client, SecureValueKind()), + } +} + +func NewSecureValueClientFromGenerator(generator resource.ClientGenerator) (*SecureValueClient, error) { + c, err := generator.ClientFor(SecureValueKind()) + if err != nil { + return nil, err + } + return NewSecureValueClient(c), nil +} + +func (c *SecureValueClient) Get(ctx context.Context, identifier resource.Identifier) (*SecureValue, error) { + return c.client.Get(ctx, identifier) +} + +func (c *SecureValueClient) List(ctx context.Context, namespace string, opts resource.ListOptions) (*SecureValueList, error) { + return c.client.List(ctx, namespace, opts) +} + +func (c *SecureValueClient) ListAll(ctx context.Context, namespace string, opts resource.ListOptions) (*SecureValueList, error) { + resp, err := c.client.List(ctx, namespace, resource.ListOptions{ + ResourceVersion: opts.ResourceVersion, + Limit: opts.Limit, + LabelFilters: opts.LabelFilters, + FieldSelectors: opts.FieldSelectors, + }) + if err != nil { + return nil, err + } + for resp.GetContinue() != "" { + page, err := c.client.List(ctx, namespace, resource.ListOptions{ + Continue: resp.GetContinue(), + ResourceVersion: opts.ResourceVersion, + Limit: opts.Limit, + LabelFilters: opts.LabelFilters, + FieldSelectors: opts.FieldSelectors, + }) + if err != nil { + return nil, err + } + resp.SetContinue(page.GetContinue()) + resp.SetResourceVersion(page.GetResourceVersion()) + resp.SetItems(append(resp.GetItems(), page.GetItems()...)) + } + return resp, nil +} + +func (c *SecureValueClient) Create(ctx context.Context, obj *SecureValue, opts resource.CreateOptions) (*SecureValue, error) { + // Make sure apiVersion and kind are set + obj.APIVersion = GroupVersion.Identifier() + obj.Kind = SecureValueKind().Kind() + return c.client.Create(ctx, obj, opts) +} + +func (c *SecureValueClient) Update(ctx context.Context, obj *SecureValue, opts resource.UpdateOptions) (*SecureValue, error) { + return c.client.Update(ctx, obj, opts) +} + +func (c *SecureValueClient) Patch(ctx context.Context, identifier resource.Identifier, req resource.PatchRequest, opts resource.PatchOptions) (*SecureValue, error) { + return c.client.Patch(ctx, identifier, req, opts) +} + +func (c *SecureValueClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus SecureValueStatus, opts resource.UpdateOptions) (*SecureValue, error) { + return c.client.Update(ctx, &SecureValue{ + TypeMeta: metav1.TypeMeta{ + Kind: SecureValueKind().Kind(), + APIVersion: GroupVersion.Identifier(), + }, + ObjectMeta: metav1.ObjectMeta{ + ResourceVersion: opts.ResourceVersion, + Namespace: identifier.Namespace, + Name: identifier.Name, + }, + Status: newStatus, + }, resource.UpdateOptions{ + Subresource: "status", + ResourceVersion: opts.ResourceVersion, + }) +} + +func (c *SecureValueClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error { + return c.client.Delete(ctx, identifier, opts) +} diff --git a/apps/secret/pkg/apis/secret_manifest.go b/apps/secret/pkg/apis/secret_manifest.go index 512e381f77d..e83afe68aee 100644 --- a/apps/secret/pkg/apis/secret_manifest.go +++ b/apps/secret/pkg/apis/secret_manifest.go @@ -11,13 +11,16 @@ import ( "github.com/grafana/grafana-app-sdk/app" "github.com/grafana/grafana-app-sdk/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kube-openapi/pkg/spec3" v1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1" ) var appManifestData = app.ManifestData{ - AppName: "secret", - Group: "secret.grafana.app", + AppName: "secret", + Group: "secret.grafana.app", + PreferredVersion: "v1beta1", Versions: []app.ManifestVersion{ { Name: "v1beta1", @@ -37,6 +40,10 @@ var appManifestData = app.ManifestData{ Conversion: false, }, }, + Routes: app.ManifestVersionRoutes{ + Namespaced: map[string]spec3.PathProps{}, + Cluster: map[string]spec3.PathProps{}, + }, }, }, } @@ -66,6 +73,7 @@ var customRouteToGoResponseType = map[string]any{} // ManifestCustomRouteResponsesAssociator returns the associated response go type for a given kind, version, custom route path, and method, if one exists. // kind may be empty for custom routes which are not kind subroutes. Leading slashes are removed from subroute paths. // If there is no association for the provided kind, version, custom route path, and method, exists will return false. +// Resource routes (those without a kind) should prefix their route with "/" if the route is namespaced (otherwise the route is assumed to be cluster-scope) func ManifestCustomRouteResponsesAssociator(kind, version, path, verb string) (goType any, exists bool) { if len(path) > 0 && path[0] == '/' { path = path[1:] @@ -73,3 +81,42 @@ func ManifestCustomRouteResponsesAssociator(kind, version, path, verb string) (g goType, exists = customRouteToGoResponseType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))] return goType, exists } + +var customRouteToGoParamsType = map[string]runtime.Object{} + +func ManifestCustomRouteQueryAssociator(kind, version, path, verb string) (goType runtime.Object, exists bool) { + if len(path) > 0 && path[0] == '/' { + path = path[1:] + } + goType, exists = customRouteToGoParamsType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))] + return goType, exists +} + +var customRouteToGoRequestBodyType = map[string]any{} + +func ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb string) (goType any, exists bool) { + if len(path) > 0 && path[0] == '/' { + path = path[1:] + } + goType, exists = customRouteToGoRequestBodyType[fmt.Sprintf("%s|%s|%s|%s", version, kind, path, strings.ToUpper(verb))] + return goType, exists +} + +type GoTypeAssociator struct{} + +func NewGoTypeAssociator() *GoTypeAssociator { + return &GoTypeAssociator{} +} + +func (g *GoTypeAssociator) KindToGoType(kind, version string) (goType resource.Kind, exists bool) { + return ManifestGoTypeAssociator(kind, version) +} +func (g *GoTypeAssociator) CustomRouteReturnGoType(kind, version, path, verb string) (goType any, exists bool) { + return ManifestCustomRouteResponsesAssociator(kind, version, path, verb) +} +func (g *GoTypeAssociator) CustomRouteQueryGoType(kind, version, path, verb string) (goType runtime.Object, exists bool) { + return ManifestCustomRouteQueryAssociator(kind, version, path, verb) +} +func (g *GoTypeAssociator) CustomRouteRequestBodyGoType(kind, version, path, verb string) (goType any, exists bool) { + return ManifestCustomRouteRequestBodyAssociator(kind, version, path, verb) +} diff --git a/apps/shorturl/go.mod b/apps/shorturl/go.mod index b74ce93065c..04be420b54a 100644 --- a/apps/shorturl/go.mod +++ b/apps/shorturl/go.mod @@ -33,8 +33,8 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -84,7 +84,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.34.1 // indirect diff --git a/apps/shorturl/go.sum b/apps/shorturl/go.sum index ec3c417fa86..58be81e02af 100644 --- a/apps/shorturl/go.sum +++ b/apps/shorturl/go.sum @@ -48,10 +48,10 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk v0.46.0 h1:gvzQvCQgZJ/73BfAcbDt/6TAMhnVikVPxZt/UwDl+oc= @@ -217,8 +217,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/apps/shorturl/kinds/shorturl.cue b/apps/shorturl/kinds/shorturl.cue index e1a242399e5..ea234173776 100644 --- a/apps/shorturl/kinds/shorturl.cue +++ b/apps/shorturl/kinds/shorturl.cue @@ -20,7 +20,7 @@ shorturl: { response: { url: string } - responseMetadata: typeMeta: false // Don't include kube typeMeta + responseMetadata: typeMeta: false } } } diff --git a/apps/shorturl/pkg/apis/shorturl/v1alpha1/shorturl_getgoto_types_gen.go b/apps/shorturl/pkg/apis/shorturl/v1alpha1/shorturl_getgoto_response_types_gen.go similarity index 100% rename from apps/shorturl/pkg/apis/shorturl/v1alpha1/shorturl_getgoto_types_gen.go rename to apps/shorturl/pkg/apis/shorturl/v1alpha1/shorturl_getgoto_response_types_gen.go diff --git a/apps/shorturl/pkg/apis/shorturl_manifest.go b/apps/shorturl/pkg/apis/shorturl_manifest.go index 5d5b718a681..c4446d924e9 100644 --- a/apps/shorturl/pkg/apis/shorturl_manifest.go +++ b/apps/shorturl/pkg/apis/shorturl_manifest.go @@ -26,8 +26,9 @@ var ( ) var appManifestData = app.ManifestData{ - AppName: "shorturl", - Group: "shorturl.grafana.app", + AppName: "shorturl", + Group: "shorturl.grafana.app", + PreferredVersion: "v1alpha1", Versions: []app.ManifestVersion{ { Name: "v1alpha1", @@ -52,7 +53,7 @@ var appManifestData = app.ManifestData{ Get: &spec3.Operation{ OperationProps: spec3.OperationProps{ - OperationId: "GetGoto", + OperationId: "getGoto", Responses: &spec3.Responses{ ResponsesProps: spec3.ResponsesProps{ diff --git a/conf/defaults.ini b/conf/defaults.ini index c7044021cd1..1c47f30e6f3 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -2148,24 +2148,6 @@ show_ui = true # Disables TLS in the secure socks proxy allow_insecure = false -################################## Feature Management ############################################## -# Options to configure the experimental Feature Toggle Admin Page feature, which is behind the `featureToggleAdminPage` feature toggle. Use at your own risk. -[feature_management] -# Allows editing of feature toggles in the feature management page -allow_editing = false - -# Allow customization of URL for the controller that manages feature toggles -update_webhook = - -# Allow configuring an auth token for feature management update requests -update_webhook_token = - -# Hides specific feature toggles from the feature management page -hidden_toggles = - -# Disables updating specific feature toggles in the feature management page -read_only_toggles = - #################################### Public Dashboards ##################################### [public_dashboards] # Set to false to disable public dashboards @@ -2247,3 +2229,8 @@ allowed_targets = instance|folder # Whether image rendering is allowed for dashboard previews. # Requires image rendering service to be configured. allow_image_rendering = true + +# The minimum sync interval that can be set for a repository. This is how often the controller +# will check if there has been any changes to the repository not propagated by a webhook. +# The minimum value is 10 seconds. +min_sync_interval = 10s diff --git a/conf/sample.ini b/conf/sample.ini index eec89e05b73..964b3df5196 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -2050,20 +2050,6 @@ default_datasource_uid = ; show_ui = true ; allow_insecure = false -################################## Feature Management ############################################## -[feature_management] -# Options to configure the experimental Feature Toggle Admin Page feature, which is behind the `featureToggleAdminPage` feature toggle. Use at your own risk. -# Allow editing of feature toggles in the feature management page -;allow_editing = false -# Allow customization of URL for the controller that manages feature toggles -;update_webhook = -# Allow configuring an auth token for feature management update requests -;update_webhook_token = -# Hide specific feature toggles from the feature management page -;hidden_toggles = -# Disable updating specific feature toggles in the feature management page -;read_only_toggles = - #################################### Public Dashboards ##################################### [public_dashboards] # Set to false to disable public dashboards diff --git a/docs/sources/alerting/alerting-rules/templates/_index.md b/docs/sources/alerting/alerting-rules/templates/_index.md index a2afde85de7..f66781f01e5 100644 --- a/docs/sources/alerting/alerting-rules/templates/_index.md +++ b/docs/sources/alerting/alerting-rules/templates/_index.md @@ -220,6 +220,24 @@ To preview label values, select `Use notification policy`, and then click on `Pr {{< figure src="/media/docs/alerting/alert-instance-routing-preview.png" max-width="1200px" alt="Routing preview displays label values" >}} +## Grafana Cloud AI-generated templates + +Grafana Cloud users can use built-in AI tool to generate templates in the appropriate [alerting template language](/docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/template-notifications/language/) for you. + +To use AI to create your template, follow these steps: + +1. Go to **Alerting -> Contact points**. + +1. Click the Notification Templates tab then, click the **+ Add notification template group** button. + +1. Name your template. + +1. In the Template group section, click the **Generate with AI** button. + +1. Supply the AI tool with a prompt or select from one of the example prompts and edit that if necessary. + +1. Click **Save**. + ## More information For further details on how to template alert rules, refer to: diff --git a/docs/sources/alerting/fundamentals/alert-rule-evaluation/nodata-and-error-states.md b/docs/sources/alerting/fundamentals/alert-rule-evaluation/nodata-and-error-states.md index f91f9d9f000..50182bd520a 100644 --- a/docs/sources/alerting/fundamentals/alert-rule-evaluation/nodata-and-error-states.md +++ b/docs/sources/alerting/fundamentals/alert-rule-evaluation/nodata-and-error-states.md @@ -106,7 +106,7 @@ A Grafana-managed alert instance can be in any of the following states, dependin The **Error** state is triggered when the alert rule fails to evaluate its query or queries successfully. -This can occur due to evaluation timeouts (default: `30s`) or three repeated failures when querying the data source. The [`evaluation_timeout`](ref:evaluation_timeout) and [`max_attempts`](ref:max_attempts) options control these settings. +This can occur due to evaluation timeouts (default: `30s`) or repeated failures (default: `3`) when querying the data source. The [`evaluation_timeout`](ref:evaluation_timeout) and [`max_attempts`](ref:max_attempts) options control these settings. When an alert instance enters the **Error** state, Grafana, by default, triggers a new [`DatasourceError` alert](#no-data-and-error-alerts). You can control this behavior based on the desired outcome of your alert rule in [Modify the `No Data` or `Error` state](#modify-the-no-data-or-error-state). @@ -157,10 +157,10 @@ To minimize the number of **No Data** or **Error** state alerts received, try th To minimize timeouts resulting in the **Error** state, reduce the time range to request less data every evaluation cycle. -1. Change the default [evaluation time out](ref:evaluation_timeout). The default is set at 30 seconds. To increase the default evaluation timeout, open a support ticket from the [Cloud Portal](https://grafana.com/docs/grafana-cloud/account-management/support/#grafana-cloud-support-options). Note that this should be a last resort, because it may affect the performance of all alert rules and cause missed evaluations if the timeout is too long. - 1. To reduce multiple notifications from **Error** alerts, define a [notification policy](ref:notification-policies) to handle all related alerts with `alertname=DatasourceError`, and filter and group errors from the same data source using the `datasource_uid` label. +1. Change the [evaluation timeout](ref:evaluation_timeout) (default: `30s`) or the [retry mechanism (`max_attempts`)](ref:max_attempts) settings. This should be a last resort, as it can affect the performance of all alert rules and may cause missed evaluations if the timeout is too long. For Grafana Cloud, open a support ticket from the [Cloud Portal](https://grafana.com/docs/grafana-cloud/account-management/support/#grafana-cloud-support-options). + {{< admonition type="tip" >}} For common examples and practical guidance on handling **Error**, **No Data**, and **stale** alert scenarios, refer to the [Handle connectivity errors](ref:guide-connectivity-errors) and [Handle missing data](ref:guide-missing-data) guides. {{< /admonition >}} diff --git a/docs/sources/alerting/set-up/performance-limitations/index.md b/docs/sources/alerting/set-up/performance-limitations/index.md index 592504d9a91..82e8ccd41f1 100644 --- a/docs/sources/alerting/set-up/performance-limitations/index.md +++ b/docs/sources/alerting/set-up/performance-limitations/index.md @@ -58,20 +58,20 @@ For more information, refer to [this GitHub issue](https://github.com/grafana/gr ## High load on database caused by a high number of alert instances -If you have a high number of alert instances, it can happen that the load on the database gets very high, as each state -transition of an alert instance is saved in the database after every evaluation. +If you have a high number of alert rules or alert instances, the load on the database can get very high. -### Compressed alert state +By default, Grafana performs one SQL update per alert rule after each evaluation, which updates all alert instances belonging to the rule. -When the `alertingSaveStateCompressed` feature toggle is enabled, Grafana saves the alert rule state in a compressed form. Instead of performing an individual SQL update for each alert instance, Grafana performs a single SQL update per alert rule, updating all alert instances belonging to that rule. - -This can significantly reduce database overhead for alert rules with many alert instances. +You can change this behavior by disabling the `alertingSaveStateCompressed` feature flag. In this case, Grafana performs a separate SQL update for each state change of an alert instance. This configuration is rarely recommended, as it can add significant database overhead for alert rules with many instances. ### Save state periodically -High load can be also prevented by writing to the database periodically, instead of after every evaluation. +You can also reduce database load by writing states periodically instead of after every evaluation. -To save state periodically, enable the `alertingSaveStatePeriodic` feature toggle. +To save state periodically: + +1. Enable the `alertingSaveStatePeriodic` feature toggle. +1. Disable the `alertingSaveStateCompressed` feature toggle. By default, it saves the states every 5 minutes to the database and on each shutdown. The periodic interval can also be configured using the `state_periodic_save_interval` configuration flag. During this process, Grafana deletes all existing alert instances from the database and then writes the entire current set of instances back in batches in a single transaction. diff --git a/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md b/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md index ea79a3686c6..dbe77c05013 100644 --- a/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md +++ b/docs/sources/dashboards/build-dashboards/modify-dashboard-settings/index.md @@ -10,7 +10,6 @@ labels: - cloud - enterprise - oss -menuTitle: Modify dashboard settings title: Modify dashboard settings description: Manage and edit your dashboard settings weight: 8 @@ -56,7 +55,7 @@ To access the dashboard setting page: Adjust dashboard time settings when you want to change the dashboard timezone, the local browser time, and specify auto-refresh time intervals. -1. On the **Settings** page, scroll down to the **Time Options** section of the **General** tab. +1. On the the **General** tab of the **Settings** page, scroll down to the **Time options** section. 1. Specify time settings as follows. - **Time zone:** Specify the local time zone of the service or system that you are monitoring. This can be helpful when monitoring a system or service that operates across several time zones. - **Default:** Grafana uses the default selected time zone for the user profile, team, or organization. If no time zone is specified for the user profile, a team the user is a member of, or the organization, then Grafana uses the local browser time. @@ -71,6 +70,21 @@ Adjust dashboard time settings when you want to change the dashboard timezone, t 1. Click **Save**. 1. Click **Exit edit**. +## Modify graph tooltip behavior + +Use this option to control tooltip and hover highlight behavior across graph panels (for example, time series). + +1. On the the **General** tab of the **Settings** page, scroll down to the **Panel options** section. +1. Choose from the following options to control the tooltip and hover highlight behavior across graph panels: + - **Default** - Tooltip and hover highlight behavior isn't shared across panels. + - **Shared crosshair** - When you hover the cursor over one graph panel in the dashboard, the crosshair is also displayed on all other graph panels in the dashboard. + - **Shared tooltip** - When you hover the cursor over one graph panel in the dashboard, the crosshair and tooltips are also displayed on all other graph panels in the dashboard. + +1. Click **Save dashboard**. +1. (Optional) Enter a description of the changes you've made. +1. Click **Save**. +1. Click **Exit edit**. + ## Add tags You can add metadata to your dashboards using tags. Tags also give you the ability to filter the list of dashboards. @@ -79,7 +93,7 @@ Tags can be up to 50 characters long, including spaces. To add tags to a dashboard, follow these steps: -1. On the **Settings** page, scroll down to the **Tags** section of the **General** tab. +1. On the the **General** tab of the **Settings** page, scroll down to the **Tags** section. 1. In the field, enter a new or existing tag. If you're entering an existing tag, make sure that you spell it the same way or a new tag is created. diff --git a/docs/sources/dashboards/share-dashboards-panels/shared-dashboards/index.md b/docs/sources/dashboards/share-dashboards-panels/shared-dashboards/index.md index 5518ef51146..9ea6df4e47e 100644 --- a/docs/sources/dashboards/share-dashboards-panels/shared-dashboards/index.md +++ b/docs/sources/dashboards/share-dashboards-panels/shared-dashboards/index.md @@ -156,7 +156,7 @@ On this screen, you can see: - The earliest time a user has been active in a dashboard - When they last accessed a shared dashboard -- The dashboards to they have access +- The dashboards they have access to - Their role You can also revoke a user's access to all shared dashboards on from this tab. diff --git a/docs/sources/developers/plugins/plugin.schema.json b/docs/sources/developers/plugins/plugin.schema.json index 3645aed2ef4..8cb3954f13d 100644 --- a/docs/sources/developers/plugins/plugin.schema.json +++ b/docs/sources/developers/plugins/plugin.schema.json @@ -688,7 +688,7 @@ }, "languages": { "type": "array", - "description": "The list of languages supported by the plugin. Each entry should be a locale identifier in the format `language-COUNTRY` (for example `en-US`, `fr-FR`, `es-ES`).", + "description": "The list of languages supported by the plugin. Each entry should be a locale identifier in the format `language-COUNTRY` (for example `en-US`, `es-ES`, `de-DE`).", "items": { "type": "string" } diff --git a/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md b/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md index bb8de19264f..fda106213f9 100644 --- a/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md +++ b/docs/sources/panels-visualizations/query-transform-data/transform-data/index.md @@ -452,24 +452,28 @@ This transformation is very useful if your data source does not natively filter The available conditions for all fields are: -- **Regex** - Match a regex expression. - **Is Null** - Match if the value is null. - **Is Not Null** - Match if the value is not null. - **Equal** - Match if the value is equal to the specified value. -- **Different** - Match if the value is different than the specified value. +- **Not Equal** - Match if the value is not equal to the specified value. +- **Regex** - Match a regex expression. The available conditions for string fields are: - **Contains substring** - Match if the value contains the specified substring (case insensitive). - **Does not contain substring** - Match if the value doesn't contain the specified substring (case insensitive). -The available conditions for number and time fields are: +The available conditions for number fields are: - **Greater** - Match if the value is greater than the specified value. - **Lower** - Match if the value is lower than the specified value. - **Greater or equal** - Match if the value is greater or equal. - **Lower or equal** - Match if the value is lower or equal. -- **Range** - Match a range between a specified minimum and maximum, min and max included. A time field will pre-populate with variables to filter by selected time. +- **In between** - Match a range between a specified minimum and maximum, min and max included. + +The available conditions for time fields are: + +- **In between** - Match a range between a specified minimum and maximum. The min and max values will pre-populate with variables to filter by selected time. Consider the following dataset: diff --git a/docs/sources/panels-visualizations/visualizations/node-graph/index.md b/docs/sources/panels-visualizations/visualizations/node-graph/index.md index 577794bf65c..a18b7349310 100644 --- a/docs/sources/panels-visualizations/visualizations/node-graph/index.md +++ b/docs/sources/panels-visualizations/visualizations/node-graph/index.md @@ -238,6 +238,6 @@ Optional fields: | arc\_\_\* | number | Any field prefixed with `arc__` will be used to create the color circle around the node. All values in these fields should add up to 1. You can specify color using `config.color.fixedColor`. | | detail\_\_\* | string/number | Any field prefixed with `detail__` will be shown in the header of context menu when clicked on the node. Use `config.displayName` for more human readable label. | | color | string/number | Can be used to specify a single color instead of using the `arc__` fields to specify color sections. It can be either a string which should then be an acceptable HTML color string or it can be a number in which case the behavior depends on `field.config.color.mode` setting. This can be for example used to create gradient colors controlled by the field value. | -| icon | string | Name of the icon to show inside the node instead of the default stats. Only Grafana [built in icons](https://developers.grafana.com/ui/latest/index.html?path=/story/docs-overview-icon--icons-overview)) are allowed. | +| icon | string | Name of the icon to show inside the node instead of the default stats. Only Grafana [built in icons](https://developers.grafana.com/ui/latest/index.html?path=/story/iconography-icon--icons-overview)) are allowed. | | nodeRadius | number | Radius value in pixels. Used to manage node size. | | highlighted | boolean | Sets whether the node should be highlighted. Useful for example to represent a specific path in the graph by highlighting several nodes and edges. Default: `false` | diff --git a/docs/sources/setup-grafana/configure-grafana/_index.md b/docs/sources/setup-grafana/configure-grafana/_index.md index a353e49f048..9702851cd19 100644 --- a/docs/sources/setup-grafana/configure-grafana/_index.md +++ b/docs/sources/setup-grafana/configure-grafana/_index.md @@ -1915,7 +1915,40 @@ The timeout string is a possibly signed sequence of decimal numbers, followed by #### `max_attempts` -Sets a maximum number of times Grafana attempts to evaluate an alert rule before giving up on that evaluation. The default value is `3`. +The maximum number of times Grafana retries evaluating an alert rule before giving up on that evaluation. Default is `3`. + +The retry mechanism: + +- Adds jitter to retry delays to prevent thundering herd problems when multiple rules fail simultaneously. +- Stops when either `max_attempts` is reached or the rule’s evaluation interval is exceeded. + +You can customize retry behaviour with `initial_retry_delay`, `max_retry_delay`, and `randomization_factor`. + +#### `initial_retry_delay` + +The initial delay before retrying a failed alert evaluation. Default is `1s`. + +This value is the starting point for exponential backoff. + +#### `max_retry_delay` + +The maximum delay between retries during exponential backoff. Default is `4s`. + +After the retry delay reaches `max_retry_delay`, all subsequent retries use this delay. + +To avoid overlapping retries with scheduled evaluations, `max_retry_delay` must be less than the rule’s evaluation interval. + +#### `randomization_factor` + +The randomization factor for exponential backoff retries. Default is `0.1`. + +The value must be between `0` and `1`. + +The actual retry delay is chosen randomly between: + +``` +[current_delay*(1-randomization_factor), current_delay*(1+randomization_factor)] +``` #### `min_interval` diff --git a/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md b/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md index f387da5857b..e8b12113400 100644 --- a/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md +++ b/docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md @@ -101,7 +101,7 @@ Most [generally available](https://grafana.com/docs/release-life-cycle/#general- | `pdfTables` | Enables generating table data as PDF in reporting | | `canvasPanelPanZoom` | Allow pan and zoom in canvas panel | | `regressionTransformation` | Enables regression analysis transformation | -| `alertingSaveStateCompressed` | Enables the compressed protobuf-based alert state storage | +| `alertingSaveStateCompressed` | Enables the compressed protobuf-based alert state storage. Default is enabled. | | `sqlExpressions` | Enables SQL Expressions, which can execute SQL queries against data source results. | | `queryLibrary` | Enables Saved queries (query library) feature | | `enableSCIM` | Enables SCIM support for user and group management | diff --git a/docs/sources/setup-grafana/configure-security/configure-authentication/azuread/index.md b/docs/sources/setup-grafana/configure-security/configure-authentication/azuread/index.md index f1d507bd526..6abdfb93dd4 100644 --- a/docs/sources/setup-grafana/configure-security/configure-authentication/azuread/index.md +++ b/docs/sources/setup-grafana/configure-security/configure-authentication/azuread/index.md @@ -12,14 +12,14 @@ labels: - cloud - enterprise - oss -menuTitle: Azure AD/Entra ID OAuth -title: Configure Azure AD/Entra ID OAuth authentication +menuTitle: Entra ID OAuth +title: Configure Entra ID OAuth authentication weight: 800 --- -# Configure Azure AD/Entra ID OAuth authentication +# Configure Entra ID OAuth authentication -The Azure AD authentication allows you to use a Microsoft Entra ID (formerly known as Azure Active Directory) tenant as an identity provider for Grafana. You can use Entra ID application roles to assign users and groups to Grafana roles from the Azure Portal. +The Entra ID authentication allows you to use a Microsoft Entra ID (formerly known as Azure Active Directory) tenant as an identity provider for Grafana. You can use Entra ID application roles to assign users and groups to Grafana roles from the Azure Portal. {{< admonition type="note" >}} If Users use the same email address in Microsoft Entra ID that they use with other authentication providers (such as Grafana.com), you need to do additional configuration to ensure that the users are matched correctly. Please refer to [Using the same email address to login with different identity providers](../#using-the-same-email-address-to-login-with-different-identity-providers) for more information. @@ -27,7 +27,7 @@ If Users use the same email address in Microsoft Entra ID that they use with oth ## Create the Microsoft Entra ID application -To enable the Azure AD/Entra ID OAuth, register your application with Entra ID. +To enable the Entra ID OAuth, register your application with Entra ID. 1. Log in to [Azure Portal](https://portal.azure.com), then click **Microsoft Entra ID** in the side menu. @@ -119,7 +119,7 @@ To enable the Azure AD/Entra ID OAuth, register your application with Entra ID. 1. Click **Add user/group** to add a user or group to the Grafana roles. {{< admonition type="note" >}} -When assigning a group to a Grafana role, ensure that users are direct members of the group. Users in nested groups will not have access to Grafana due to limitations within Azure AD/Entra ID side. For more information, see [Microsoft Entra service limits and restrictions](https://learn.microsoft.com/en-us/entra/identity/users/directory-service-limits-restrictions). +When assigning a group to a Grafana role, ensure that users are direct members of the group. Users in nested groups will not have access to Grafana due to limitations within Entra ID side. For more information, see [Microsoft Entra service limits and restrictions](https://learn.microsoft.com/en-us/entra/identity/users/directory-service-limits-restrictions). {{< /admonition >}} ### Configure application roles for Grafana in the Azure Portal @@ -226,9 +226,9 @@ If the setting is set to `false`, the user is assigned the role of `Admin` of th Ensure that you have followed the steps in [Create the Microsoft Entra ID application](#create-the-microsoft-entra-id-application) before you begin. -## Configure Azure AD authentication client using the Grafana UI +## Configure Entra ID authentication client using the Grafana UI -As a Grafana Admin, you can configure your Azure AD/Entra ID OAuth client from within Grafana using the Grafana UI. To do this, navigate to the **Administration > Authentication > Azure AD** page and fill in the form. If you have a current configuration in the Grafana configuration file, the form will be pre-populated with those values. Otherwise the form will contain default values. +As a Grafana Admin, you can configure your Entra ID OAuth client from within Grafana using the Grafana UI. To do this, navigate to the **Administration > Authentication > Azure AD** page and fill in the form. If you have a current configuration in the Grafana configuration file, the form will be pre-populated with those values. Otherwise the form will contain default values. After you have filled in the form, click **Save** to save the configuration. If the save was successful, Grafana will apply the new configurations. @@ -238,7 +238,7 @@ If you need to reset changes you made in the UI back to the default values, clic If you run Grafana in high availability mode, configuration changes may not get applied to all Grafana instances immediately. You may need to wait a few minutes for the configuration to propagate to all Grafana instances. {{< /admonition >}} -## Configure Azure AD authentication client using the Terraform provider +## Configure Entra ID authentication client using the Terraform provider ```terraform resource "grafana_sso_settings" "azuread_sso_settings" { @@ -270,17 +270,17 @@ resource "grafana_sso_settings" "azuread_sso_settings" { Refer to [Terraform Registry](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/sso_settings) for a complete reference on using the `grafana_sso_settings` resource. -## Configure Azure AD authentication client using the Grafana configuration file +## Configure Entra ID authentication client using the Grafana configuration file Ensure that you have access to the [Grafana configuration file](../../../configure-grafana/#configuration-file-location). -### Enable Azure AD OAuth in Grafana +### Enable Entra ID OAuth in Grafana Add the following to the [Grafana configuration file](../../../configure-grafana/#configuration-file-location): ``` [auth.azuread] -name = Azure AD +name = Entra ID enabled = true allow_sign_up = true auto_login = false @@ -321,7 +321,7 @@ When a user logs in using an OAuth provider, Grafana verifies that the access to Grafana uses a refresh token to obtain a new access token without requiring the user to log in again. If a refresh token doesn't exist, Grafana logs the user out of the system after the access token has expired. -Refresh token fetching and access token expiration check is enabled by default for the AzureAD provider since Grafana v10.1.0. If you would like to disable access token expiration check then set the `use_refresh_token` configuration value to `false`. +Refresh token fetching and access token expiration check is enabled by default for the Entra ID provider since Grafana v10.1.0. If you would like to disable access token expiration check then set the `use_refresh_token` configuration value to `false`. {{< admonition type="note" >}} The `accessTokenExpirationCheck` feature toggle has been removed in Grafana v10.3.0 and the `use_refresh_token` configuration value will be used instead for configuring refresh token fetching and access token expiration check. @@ -427,7 +427,7 @@ To learn more, refer to the [Team Sync](https://grafana.com/docs/grafana/::` mappings. Value can be `*` meaning "All users". Role is optional and can have the following values: `None`, `Viewer`, `Editor` or `Admin`. For more information on external organization to role mapping, refer to [Org roles mapping example](#org-roles-mapping-example). | | -| `allow_assign_grafana_admin` | No | No | Set to `true` to automatically sync the Grafana server administrator role. When enabled, if the Azure AD/Entra ID user's App role is `GrafanaAdmin`, Grafana grants the user server administrator privileges and the organization administrator role. If disabled, the user will only receive the organization administrator role. For more details on user role mapping, refer to [Map roles](#map-roles). | `false` | +| `allow_assign_grafana_admin` | No | No | Set to `true` to automatically sync the Grafana server administrator role. When enabled, if the Entra ID user's App role is `GrafanaAdmin`, Grafana grants the user server administrator privileges and the organization administrator role. If disabled, the user will only receive the organization administrator role. For more details on user role mapping, refer to [Map roles](#map-roles). | `false` | | `skip_org_role_sync` | No | Yes | Set to `true` to stop automatically syncing user roles. This will allow you to set organization roles for your users from within Grafana manually. | `false` | -| `allowed_groups` | No | Yes | List of comma- or space-separated groups. The user should be a member of at least one group to log in. If you configure `allowed_groups`, you must also configure Azure AD/Entra ID to include the `groups` claim following [Configure group membership claims on the Azure Portal](#configure-group-membership-claims-on-the-azure-portal). | | +| `allowed_groups` | No | Yes | List of comma- or space-separated groups. The user should be a member of at least one group to log in. If you configure `allowed_groups`, you must also configure Entra ID to include the `groups` claim following [Configure group membership claims on the Azure Portal](#configure-group-membership-claims-on-the-azure-portal). | | | `allowed_organizations` | No | Yes | List of comma- or space-separated Azure tenant identifiers. The user should be a member of at least one tenant to log in. | | | `allowed_domains` | No | Yes | List of comma- or space-separated domains. The user should belong to at least one domain to log in. | | -| `domain_hint` | No | Yes | The realm of the user in a federated directory. This skips the email-based discovery process that the user goes through on the Azure AD/Entra ID sign-in page, for a slightly more streamlined user experience. More info [here](https://learn.microsoft.com/en-us/entra/identity-platform/v2-protocols-oidc#send-the-sign-in-request). | | +| `domain_hint` | No | Yes | The realm of the user in a federated directory. This skips the email-based discovery process that the user goes through on the Entra ID sign-in page, for a slightly more streamlined user experience. More info [here](https://learn.microsoft.com/en-us/entra/identity-platform/v2-protocols-oidc#send-the-sign-in-request). | | | `tls_skip_verify_insecure` | No | No | If set to `true`, the client accepts any certificate presented by the server and any host name in that certificate. _You should only use this for testing_, because this mode leaves SSL/TLS susceptible to man-in-the-middle attacks. | `false` | | `tls_client_cert` | No | No | The path to the certificate. | | diff --git a/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md b/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md index 6c30109a271..0ebe1aee927 100644 --- a/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md +++ b/docs/sources/setup-grafana/configure-security/configure-database-encryption/integrate-with-hashicorp-vault/index.md @@ -6,7 +6,6 @@ description: Learn how to integrate Grafana with Hashicorp Vault so that you can labels: products: - enterprise - - oss title: Integrate Grafana with Hashicorp Vault weight: 500 --- diff --git a/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/configure-scim-with-azuread/_index.md b/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/configure-scim-with-azuread/_index.md index efdf8ae3c56..955a53e3f0d 100644 --- a/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/configure-scim-with-azuread/_index.md +++ b/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/configure-scim-with-azuread/_index.md @@ -139,6 +139,10 @@ Configure the following required attributes: | `objectId` | `externalId` | | `Switch([IsSoftDeleted], , "False", "True", "True", "False")` | `active` | +{{< admonition type="note" >}} +During provisioning, if the identity provider sends user attributes that has no use in Grafana, those attributes will be gracefully ignored. +{{< /admonition >}} + ### Enable provisioning Click **Start provisioning** from the top action bar in the **Overview** page from your Azure AD enterprise application. diff --git a/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md b/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md index 8887750950b..23a389d582e 100644 --- a/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md +++ b/docs/sources/setup-grafana/configure-security/configure-scim-provisioning/manage-users-teams/_index.md @@ -77,6 +77,10 @@ SCIM uses a specific process to establish and maintain user identity between the This process ensures secure and consistent user identification across both systems, preventing security issues that could arise from email changes or other user attribute modifications. +{{< admonition type="note" >}} +During provisioning, if the identity provider sends user attributes that has no use in Grafana, those attributes will be gracefully ignored. +{{< /admonition >}} + ### Existing Grafana users For users who already exist in the Grafana instance: @@ -219,7 +223,7 @@ Team provisioning requires `group_sync_enabled = true` in the SCIM configuration {{< /admonition >}} {{< admonition type="warning" >}} -Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider. +Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider. Optionally, you can disable SCIM group sync to allow manual deletion of teams. {{< /admonition >}} For detailed configuration steps specific to the identity provider, see: diff --git a/docs/sources/upgrade-guide/when-to-upgrade/index.md b/docs/sources/upgrade-guide/when-to-upgrade/index.md index 1ffaf22fd31..e7a29e531e0 100644 --- a/docs/sources/upgrade-guide/when-to-upgrade/index.md +++ b/docs/sources/upgrade-guide/when-to-upgrade/index.md @@ -46,23 +46,19 @@ We provide release documentation in multiple places to address different needs: ## When to expect releases -Grafana currently follows a monthly release schedule. Below are the planned releases for 2025, though these dates may be subject to change: +Grafana currently follows a monthly release schedule. Below are the planned releases for the end of 2025 and the first part of 2026. However, these dates may be subject to change: | **Release date** | **Grafana versions** | **Release type** | | ---------------- | ------------------------- | ---------------- | -| Jan. 28, 2025 | 11.5 & Supported versions | Minor & patching | -| Feb. 18, 2025 | Supported versions | Patching | -| March 25, 2025 | 11.6 & Supported versions | Minor & patching | -| April 23, 2025 | Supported versions | Patching | -| May 5, 2025 | Grafana 12.0 | Major only | -| May 20, 2025 | Supported versions | Patching | -| June 17, 2025 | Supported versions | Patching | -| July 22, 2025 | 12.1 & Supported versions | Minor & patching | | Aug. 12, 2025 | Supported versions | Patching | | Sept. 23, 2025 | 12.2 & Supported versions | Minor & patching | | Oct. 21, 2025 | Supported versions | Patching | | Nov. 18, 2025 | 12.3 & Supported versions | Minor & patching | | Dec. 16, 2025 | Supported versions | Patching | +| Jan. 13, 2026 | Supported versions | Patching | +| Feb. 24, 2026 | 12.4 & Supported versions | Minor & patching | +| Mar. 24, 2026 | Supported versions | Patching | +| TBD | Grafana 13 | Major | ### A few important notes @@ -104,20 +100,16 @@ Here is an overview of version support through 2026: | **Version** | **Release date** | **Support end date** | **Support level** | | ------------------------- | ------------------ | -------------------- | ------------------ | -| 10.2.x | October 24, 2023 | July 24, 2024 | Not Supported | -| 10.3.x | January 23, 2024 | October 23, 2024 | Not Supported | -| 10.4.x (Last minor of 10) | March 5, 2024 | June 5, 2025 | Not Supported | -| 11.0.x | May 14, 2024 | February 14, 2025 | Not Supported | -| 11.1.x | June 25, 2024 | April 23, 2025 | Not Supported | -| 11.2.x | August 27, 2024 | May 27, 2025 | Not Supported | | 11.3.x | October 22, 2024 | July 22, 2025 | Not Supported | -| 11.4.x | December 5, 2024 | September 5, 2025 | Patch Support | -| 11.5.x | January 28, 2025 | October 28, 2025 | Patch Support | +| 11.4.x | December 5, 2024 | September 5, 2025 | Not Supported | +| 11.5.x | January 28, 2025 | October 28, 2025 | Not Supported | | 11.6.x (Last minor of 11) | March 25, 2025 | June 25, 2026 | Patch Support | | 12.0.x | May 5, 2025 | February 5, 2026 | Patch Support | | 12.1.x | July 22, 2025 | April 22, 2026 | Patch Support | -| 12.2.x | September 23, 2025 | June 23, 2026 | Yet to be released | +| 12.2.x | September 23, 2025 | June 23, 2026 | Patch Support | | 12.3.x | November 18, 2025 | August 18, 2026 | Yet to be released | +| 12.4.x (Last minor of 12) | February 24, 2026 | November 24, 2026 | Yet to be released | +| 13.0.0 | TBD | TBD | Yet to be released | ## How are these versions supported? diff --git a/e2e-playwright/fixtures/long-trace-response.json b/e2e-playwright/fixtures/long-trace-response.json index 5605ab1dd7c..ee23f36e69a 100644 --- a/e2e-playwright/fixtures/long-trace-response.json +++ b/e2e-playwright/fixtures/long-trace-response.json @@ -598,7 +598,7 @@ "auth-validator", "config-loader", "config-writer", - "metrics-collector", + "metrics-collector-last-span", "log-writer", "log-reader", "event-publisher", diff --git a/e2e-playwright/various-suite/trace-view-scrolling.spec.ts b/e2e-playwright/various-suite/trace-view-scrolling.spec.ts index 4cc44f6a86f..bb32d3922ee 100644 --- a/e2e-playwright/various-suite/trace-view-scrolling.spec.ts +++ b/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @@ -2,11 +2,6 @@ import { test, expect } from '@grafana/plugin-e2e'; import longTraceResponse from '../fixtures/long-trace-response.json'; -// this test requires a larger viewport -test.use({ - viewport: { width: 1280, height: 1080 }, -}); - test.describe( 'Trace view', { @@ -33,7 +28,7 @@ test.describe( await datasourceList.getByText('gdev-jaeger').click(); // Check that gdev-jaeger is visible in the query editor - await expect(page.getByText('gdev-jaeger')).toBeVisible(); + await expect(page.getByTestId('query-editor-row').getByText('(gdev-jaeger)')).toBeVisible(); // Type the query const queryField = page @@ -44,14 +39,22 @@ test.describe( // Use Shift+Enter to execute the query await queryField.press('Shift+Enter'); - // Get the initial count of span bars - const initialSpanBars = page.getByTestId(selectors.components.TraceViewer.spanBar); - const initialSpanBarCount = await initialSpanBars.count(); + // Wait for the trace viewer to be ready + await expect(page.getByRole('switch', { name: /api\-gateway GET/ })).toBeVisible(); - await initialSpanBars.last().scrollIntoViewIfNeeded(); - await expect - .poll(async () => await page.getByTestId(selectors.components.TraceViewer.spanBar).count()) - .toBeGreaterThan(initialSpanBarCount); + // Note the scrolling element is actually the first child of the scroll view, but we can use the scroll wheel on this anyway + const scrollEl = page.getByTestId(selectors.pages.Explore.General.scrollView); + + // Assert that the last span is not visible in th page - it should be lazily rendered as the user scrolls + const lastSpan = page.getByRole('switch', { name: /metrics\-collector\-last\-span GET/ }); + await expect(lastSpan).not.toBeVisible(); + + // Scroll until the "metrics-collector-last-span GET" switch is visible + await expect(async () => { + await scrollEl.hover(); + await page.mouse.wheel(0, 1000); + await expect(lastSpan).toBeVisible({ timeout: 1 }); + }).toPass(); }); } ); diff --git a/eslint-suppressions.json b/eslint-suppressions.json index d6951a21011..227edc2fd71 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -833,16 +833,6 @@ "count": 13 } }, - "packages/grafana-ui/src/components/Slider/RangeSlider.story.tsx": { - "no-restricted-syntax": { - "count": 1 - } - }, - "packages/grafana-ui/src/components/Slider/Slider.story.tsx": { - "no-restricted-syntax": { - "count": 1 - } - }, "packages/grafana-ui/src/components/Table/Cells/TableCell.tsx": { "@typescript-eslint/consistent-type-assertions": { "count": 3 @@ -861,21 +851,6 @@ "count": 1 } }, - "packages/grafana-ui/src/components/Table/TableNG/Filter/Filter.tsx": { - "@typescript-eslint/no-explicit-any": { - "count": 1 - } - }, - "packages/grafana-ui/src/components/Table/TableNG/Filter/FilterPopup.tsx": { - "@typescript-eslint/no-explicit-any": { - "count": 3 - } - }, - "packages/grafana-ui/src/components/Table/TableNG/Filter/utils.ts": { - "@typescript-eslint/no-explicit-any": { - "count": 1 - } - }, "packages/grafana-ui/src/components/Table/TableNG/TableNG.test.tsx": { "@typescript-eslint/no-explicit-any": { "count": 2 @@ -1359,11 +1334,6 @@ "count": 1 } }, - "public/app/features/admin/AdminFeatureTogglesTable.tsx": { - "no-restricted-syntax": { - "count": 3 - } - }, "public/app/features/admin/ServerStatsCard.tsx": { "no-restricted-syntax": { "count": 1 @@ -1658,11 +1628,6 @@ "count": 8 } }, - "public/app/features/alerting/unified/components/rule-editor/alert-rule-form/simplifiedRouting/contactPoint/ContactPointSelector.tsx": { - "no-restricted-syntax": { - "count": 1 - } - }, "public/app/features/alerting/unified/components/rule-editor/alert-rule-form/simplifiedRouting/route-settings/ActiveTimingFields.tsx": { "no-restricted-syntax": { "count": 1 @@ -1693,11 +1658,6 @@ "count": 1 } }, - "public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/QueryAndExpressionsStep.tsx": { - "no-restricted-syntax": { - "count": 2 - } - }, "public/app/features/alerting/unified/components/rule-editor/rule-types/RuleType.tsx": { "no-restricted-syntax": { "count": 1 @@ -4484,6 +4444,11 @@ "count": 1 } }, + "public/app/plugins/panel/geomap/components/DebugOverlay.tsx": { + "@grafana/no-aria-label-selectors": { + "count": 1 + } + }, "public/app/plugins/panel/geomap/components/MarkersLegend.tsx": { "@typescript-eslint/consistent-type-assertions": { "count": 2 diff --git a/eslint.config.js b/eslint.config.js index 4aa4e15a0f9..a0245a55b48 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -361,7 +361,10 @@ module.exports = [ '**/mock*.{ts,tsx}', ], rules: { - '@grafana/i18n/no-untranslated-strings': ['error', { calleesToIgnore: ['^css$', 'use[A-Z].*'] }], + '@grafana/i18n/no-untranslated-strings': [ + 'error', + { calleesToIgnore: ['^css$', 'use[A-Z].*'], basePaths: ['public/app/features'] }, + ], '@grafana/i18n/no-translation-top-level': 'error', }, }, diff --git a/go.mod b/go.mod index bfac54f262a..285915060d0 100644 --- a/go.mod +++ b/go.mod @@ -86,9 +86,9 @@ require ( github.com/googleapis/gax-go/v2 v2.14.2 // @grafana/grafana-backend-group github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad - github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d // @grafana/alerting-backend - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // @grafana/identity-access-team - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // @grafana/identity-access-team + github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 // @grafana/alerting-backend + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics github.com/grafana/dataplane/sdata v0.0.9 // @grafana/observability-metrics github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // @grafana/grafana-backend-group @@ -211,7 +211,7 @@ require ( gonum.org/v1/gonum v0.16.0 // @grafana/oss-big-tent google.golang.org/api v0.235.0 // @grafana/grafana-backend-group google.golang.org/grpc v1.75.1 // @grafana/plugins-platform-backend - google.golang.org/protobuf v1.36.8 // @grafana/plugins-platform-backend + google.golang.org/protobuf v1.36.9 // @grafana/plugins-platform-backend gopkg.in/ini.v1 v1.67.0 // @grafana/alerting-backend gopkg.in/mail.v2 v2.3.1 // @grafana/grafana-backend-group gopkg.in/yaml.v2 v2.4.0 // @grafana/alerting-backend diff --git a/go.sum b/go.sum index b6b3737470e..63b483e722d 100644 --- a/go.sum +++ b/go.sum @@ -1585,12 +1585,12 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d h1:zzEty7HgfXbQ/RiBCJFMqaZiJlqiXuz/Zbc6/H6ksuM= -github.com/grafana/alerting v0.0.0-20250925200825-7a889aa4934d/go.mod h1:T5sitas9VhVj8/S9LeRLy6H75kTBdh/sCCqHo7gaQI8= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 h1:wfehM99Xlpltl9MQx8SITkgFgHmPGqrXoBCVLk/Q6NA= +github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA= github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg= github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= @@ -3534,8 +3534,8 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/go.work.sum b/go.work.sum index d9e85e8bc52..faceac46898 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1047,15 +1047,19 @@ github.com/grafana/alerting v0.0.0-20250925193206-bd061d3d9185 h1:R494uXJOz7glN7 github.com/grafana/alerting v0.0.0-20250925193206-bd061d3d9185/go.mod h1:T5sitas9VhVj8/S9LeRLy6H75kTBdh/sCCqHo7gaQI8= github.com/grafana/authlib v0.0.0-20250123104008-e99947858901/go.mod h1:/gYfphsNu9v1qYWXxpv1NSvMEMSwvdf8qb8YlgwIRl8= github.com/grafana/authlib v0.0.0-20250909101823-1b466dbd19a1/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= +github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= github.com/grafana/authlib/types v0.0.0-20250120144156-d6737a7dc8f5/go.mod h1:qYjSd1tmJiuVoSICp7Py9/zD54O9uQQA3wuM6Gg4DFM= github.com/grafana/authlib/types v0.0.0-20250120145936-5f0e28e7a87c/go.mod h1:qYjSd1tmJiuVoSICp7Py9/zD54O9uQQA3wuM6Gg4DFM= github.com/grafana/authlib/types v0.0.0-20250314102521-a77865c746c0/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/authlib/types v0.0.0-20250721184729-1593a38e4933/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= github.com/grafana/cog v0.0.37/go.mod h1:UDstzYqMdgIROmbfkHL8fB9XWQO2lnf5z+4W/eJo4Dc= github.com/grafana/cog v0.0.38 h1:V7gRRn/mh7Bg1ptrCxo0bv6K0SnG9TiDZk+3Ppftn6s= github.com/grafana/cog v0.0.38/go.mod h1:UDstzYqMdgIROmbfkHL8fB9XWQO2lnf5z+4W/eJo4Dc= +github.com/grafana/cog v0.0.40/go.mod h1:TDunc7TYF7EfzjwFOlC5AkMe3To/U2KqyyG3QVvrF38= +github.com/grafana/cog v0.0.41/go.mod h1:TDunc7TYF7EfzjwFOlC5AkMe3To/U2KqyyG3QVvrF38= github.com/grafana/dskit v0.0.0-20250818234656-8ff9c6532e85/go.mod h1:kImsvJ1xnmeT9Z6StK+RdEKLzlpzBsKwJbEQfmBJdFs= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= @@ -2164,6 +2168,7 @@ google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojt google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= diff --git a/packages/grafana-alerting/src/grafana/api/util.test.ts b/packages/grafana-alerting/src/grafana/api/util.test.ts index 215cba04187..f8278bbdc72 100644 --- a/packages/grafana-alerting/src/grafana/api/util.test.ts +++ b/packages/grafana-alerting/src/grafana/api/util.test.ts @@ -1,6 +1,6 @@ import { config } from '@grafana/runtime'; -import { getAPIBaseURL, getAPINamespace, getAPIReducerPath } from './util'; +import { base64UrlEncode, getAPIBaseURL, getAPINamespace, getAPIReducerPath } from './util'; describe('API utilities', () => { const originalAppSubUrl = config.appSubUrl; @@ -64,4 +64,72 @@ describe('API utilities', () => { expect(result).toBe('notifications.alerting.grafana.app/v0alpha1'); }); }); + + describe('base64UrlEncode', () => { + it('should encode simple ASCII strings', () => { + expect(base64UrlEncode('hello')).toBe('aGVsbG8'); + }); + + it('should encode strings with special characters', () => { + expect(base64UrlEncode('hello world!')).toBe('aGVsbG8gd29ybGQh'); + }); + + it('should handle emoji characters correctly', () => { + // Single emoji + expect(base64UrlEncode('⛳')).toBe('4puz'); + // Multi-byte emoji + expect(base64UrlEncode('🧀')).toBe('8J-ngA'); + // Emoji with variant selector + expect(base64UrlEncode('❤️')).toBe('4p2k77iP'); + }); + + it('should handle mixed ASCII and Unicode characters', () => { + const input = 'hello⛳❤️🧀'; + const encoded = base64UrlEncode(input); + expect(encoded).toBe('aGVsbG_im7PinaTvuI_wn6eA'); + }); + + it('should convert to base64url format (no padding)', () => { + // Standard base64 would have padding with '=' + const result = base64UrlEncode('test'); + expect(result).not.toContain('='); + }); + + it('should replace + with - and / with _', () => { + // String that produces both + and / in standard base64 + const input = 'a??b'; // produces 'YT8/Yg==' in base64, which has / + const input2 = 'a?>b'; // produces 'YT8+Yg==' in base64, which has + + const encoded = base64UrlEncode(input); + const encoded2 = base64UrlEncode(input2); + expect(encoded).not.toContain('+'); + expect(encoded).not.toContain('/'); + expect(encoded2).not.toContain('+'); + expect(encoded2).not.toContain('/'); + expect(encoded).toContain('_'); // Should have _ instead of / + expect(encoded2).toContain('-'); // Should have - instead of + + }); + + it('should handle empty strings', () => { + expect(base64UrlEncode('')).toBe(''); + }); + + it('should handle contact point names with special characters', () => { + expect(base64UrlEncode('my-contact-point')).toBe('bXktY29udGFjdC1wb2ludA'); + expect(base64UrlEncode('Contact Point 🔔')).toBe('Q29udGFjdCBQb2ludCDwn5SU'); + }); + + it('should throw error for malformed UTF-16 strings with lone surrogates', () => { + // String with lone high surrogate + const malformedString = 'hello\uDE75'; + expect(() => base64UrlEncode(malformedString)).toThrow( + 'Cannot encode malformed UTF-16 string with lone surrogates' + ); + }); + + it('should handle well-formed strings with proper surrogate pairs', () => { + // Proper surrogate pair for emoji (U+1F9C0) + const wellFormedString = 'hello\uD83E\uDDC0'; + expect(() => base64UrlEncode(wellFormedString)).not.toThrow(); + }); + }); }); diff --git a/packages/grafana-alerting/src/grafana/api/util.ts b/packages/grafana-alerting/src/grafana/api/util.ts index a562e151366..cc7787716a4 100644 --- a/packages/grafana-alerting/src/grafana/api/util.ts +++ b/packages/grafana-alerting/src/grafana/api/util.ts @@ -13,3 +13,39 @@ export const getAPIBaseURL = (group: string, version: string) => { // By including the version in the reducer path we can prevent cache bugs when different versions of the API are used for the same entities export const getAPIReducerPath = (group: string, version: string) => `${group}/${version}` as const; + +/** + * Check if a string is well-formed UTF-16 (no lone surrogates). + * encodeURIComponent() throws an error for lone surrogates + */ +export const isWellFormed = (str: string): boolean => { + try { + encodeURIComponent(str); + return true; + } catch (error) { + return false; + } +}; + +/** + * Base64URL encode a string using native browser APIs. + * Handles Unicode characters correctly by using TextEncoder. + * Converts standard base64 to base64url by replacing + with -, / with _, and removing padding. + * @throws Error if the input string contains lone surrogates (malformed UTF-16) + */ +export const base64UrlEncode = (value: string): string => { + // Check if the string is well-formed UTF-16 + if (!isWellFormed(value)) { + throw new Error(`Cannot encode malformed UTF-16 string with lone surrogates: ${value}`); + } + + // Encode UTF-8 string to bytes + const bytes = new TextEncoder().encode(value); + + // Convert bytes to base64 + const binString = String.fromCodePoint(...bytes); + const base64 = btoa(binString); + + // Convert to base64url format + return base64.replace(/\+/g, '-').replace(/\//g, '_').replace(/=/g, ''); +}; diff --git a/packages/grafana-alerting/src/index.ts b/packages/grafana-alerting/src/index.ts index 288d2c776fd..e7e8f4b22d2 100644 --- a/packages/grafana-alerting/src/index.ts +++ b/packages/grafana-alerting/src/index.ts @@ -11,5 +11,8 @@ export { AlertLabels } from './grafana/rules/components/labels/AlertLabels'; export { AlertLabel } from './grafana/rules/components/labels/AlertLabel'; // keep label utils internal to the app for now +// Utilities +export { base64UrlEncode } from './grafana/api/util'; + // This is a dummy export so typescript doesn't error importing an "empty module" export const index = {}; diff --git a/packages/grafana-data/src/text/sanitize.test.ts b/packages/grafana-data/src/text/sanitize.test.ts index a0c69838976..f6a43d4a943 100644 --- a/packages/grafana-data/src/text/sanitize.test.ts +++ b/packages/grafana-data/src/text/sanitize.test.ts @@ -167,9 +167,9 @@ describe('validatePath', () => { expect(validatePath(urlWithDots)).toBe(urlWithDots); }); - it('should allow query parameters that contain dots', () => { - const urlWithDotsInQuery = 'https://api.example.com/search?version=1.2.3&file=../config'; - expect(validatePath(urlWithDotsInQuery)).toBe(urlWithDotsInQuery); + it('should block query parameters that contain path traversal', () => { + const urlWithTraversalInQuery = 'https://api.example.com/search?version=1.2.3&file=../config'; + expect(() => validatePath(urlWithTraversalInQuery)).toThrow(PathValidationError); }); it('should handle malformed URLs gracefully', () => { diff --git a/packages/grafana-data/src/text/sanitize.ts b/packages/grafana-data/src/text/sanitize.ts index 2dae143ff69..121466ab81a 100644 --- a/packages/grafana-data/src/text/sanitize.ts +++ b/packages/grafana-data/src/text/sanitize.ts @@ -146,27 +146,25 @@ export class PathValidationError extends Error { */ export function validatePath(path: OriginalPath): OriginalPath { try { - let originalDecoded: string = path; // down-cast to a string to indicate this can't be returned + let decoded: string = path; while (true) { - const nextDecode = decodeURIComponent(originalDecoded); - if (nextDecode === originalDecoded) { + const nextDecode = decodeURIComponent(decoded); + if (nextDecode === decoded) { break; // String is fully decoded. } - originalDecoded = nextDecode; + decoded = nextDecode; } - // Remove query params and fragments to check only the path portion - const cleaned = originalDecoded.split(/[\?#]/)[0]; - originalDecoded = cleaned; - - // If the original string contains traversal attempts, block it - if (/\.\.|\/\\|[\t\n\r]/.test(originalDecoded)) { + // Validate the entire decoded string for traversal attempts + // This prevents attacks that use query separators to hide traversal payloads + if (/\.\.|\/\\|[\t\n\r]/.test(decoded)) { throw new PathValidationError(); } + // Return the original path (not the decoded version) to preserve the full URL return path; } catch (err) { - // Rethrow the original InvalidPathError to preserve the stack trace + // Rethrow the original PathValidationError to preserve the stack trace if (err instanceof PathValidationError) { throw err; } diff --git a/packages/grafana-data/src/types/featureToggles.gen.ts b/packages/grafana-data/src/types/featureToggles.gen.ts index 06546e18d9d..546b77a1af5 100644 --- a/packages/grafana-data/src/types/featureToggles.gen.ts +++ b/packages/grafana-data/src/types/featureToggles.gen.ts @@ -115,6 +115,10 @@ export interface FeatureToggles { */ starsFromAPIServer?: boolean; /** + * Routes stars requests from /api to the /apis endpoint + */ + kubernetesStars?: boolean; + /** * Enable streaming JSON parser for InfluxDB datasource InfluxQL query language */ influxqlStreamingParser?: boolean; @@ -205,10 +209,6 @@ export interface FeatureToggles { */ grafanaAPIServerEnsureKubectlAccess?: boolean; /** - * Enable admin page for managing feature toggles from the Grafana front-end. Grafana Cloud only. - */ - featureToggleAdminPage?: boolean; - /** * Enable caching for async queries for Redshift and Athena. Requires that the datasource has caching and async query support enabled * @default true */ @@ -464,7 +464,7 @@ export interface FeatureToggles { */ alertingSaveStatePeriodic?: boolean; /** - * Enables the compressed protobuf-based alert state storage + * Enables the compressed protobuf-based alert state storage. Default is enabled. * @default true */ alertingSaveStateCompressed?: boolean; @@ -479,6 +479,11 @@ export interface FeatureToggles { */ useScopeSingleNodeEndpoint?: boolean; /** + * Makes the frontend use the 'names' param for fetching multiple scope nodes at once + * @default false + */ + useMultipleScopeNodesEndpoint?: boolean; + /** * In-development feature that will allow injection of labels into prometheus queries. * @default true */ @@ -880,6 +885,11 @@ export interface FeatureToggles { */ alertingJiraIntegration?: boolean; /** + * + * @default true + */ + alertingUseNewSimplifiedRoutingHashAlgorithm?: boolean; + /** * Use the scopes navigation endpoint instead of the dashboardbindings endpoint */ useScopesNavigationEndpoint?: boolean; @@ -960,10 +970,6 @@ export interface FeatureToggles { */ multiTenantTempCredentials?: boolean; /** - * Enables localization for plugins - */ - localizationForPlugins?: boolean; - /** * Enables unified navbars * @default false */ @@ -1192,4 +1198,9 @@ export interface FeatureToggles { * @default false */ cdnPluginsLoadFirst?: boolean; + /** + * Enable loading plugins via declarative URLs + * @default false + */ + cdnPluginsUrls?: boolean; } diff --git a/packages/grafana-eslint-rules/rules/no-aria-label-e2e-selectors.cjs b/packages/grafana-eslint-rules/rules/no-aria-label-e2e-selectors.cjs index 27ad2f2a4f2..45fae240f07 100644 --- a/packages/grafana-eslint-rules/rules/no-aria-label-e2e-selectors.cjs +++ b/packages/grafana-eslint-rules/rules/no-aria-label-e2e-selectors.cjs @@ -50,7 +50,7 @@ const rule = createRule({ (v) => v.type === 'ImportBinding' && v.parent.type === 'ImportDeclaration' && - v.parent.source.value === GRAFANA_E2E_PACKAGE_NAME + v.parent.source.value.startsWith(GRAFANA_E2E_PACKAGE_NAME) ); if (importDef) { diff --git a/packages/grafana-eslint-rules/tests/no-aria-label-e2e-selectors.test.js b/packages/grafana-eslint-rules/tests/no-aria-label-e2e-selectors.test.js index 60ef0a89644..3964cc8921e 100644 --- a/packages/grafana-eslint-rules/tests/no-aria-label-e2e-selectors.test.js +++ b/packages/grafana-eslint-rules/tests/no-aria-label-e2e-selectors.test.js @@ -19,12 +19,15 @@ const ruleTester = new RuleTester(); ruleTester.run('eslint no-aria-label-e2e-selector', noAriaLabelE2ESelector, { valid: [ { + name: 'direct aria-label usage', code: `
`, }, { + name: 'basic jsx expression container aria-label usage', code: `
`, }, { + name: 'imported from something else', code: ` import { someOtherImport } from './some-other-location'; @@ -34,9 +37,23 @@ import { someOtherImport } from './some-other-location'; ], invalid: [ { + name: 'imported from e2e-selectors package', code: ` import { selectors } from '@grafana/e2e-selectors'; +
+ `, + errors: [ + { + message: 'Use data-testid for E2E selectors instead of aria-label', + }, + ], + }, + { + name: 'imported from elsewhere in e2e-selectors package', + code: ` +import { selectors } from '@grafana/e2e-selectors/src'; +
`, errors: [ diff --git a/packages/grafana-i18n/src/eslint/README.md b/packages/grafana-i18n/src/eslint/README.md index 4fb1abdf8f6..12b3a559f9b 100644 --- a/packages/grafana-i18n/src/eslint/README.md +++ b/packages/grafana-i18n/src/eslint/README.md @@ -10,6 +10,30 @@ Check if strings are marked for translation inside JSX Elements, in certain JSX ### Options +#### `basePaths` + +Allows specifying base paths that should be stripped when generating i18n keys. Defaults to `['src']`. + +#### Example + +```tsx +// For a file located at public/app/features/search/EmptyState.tsx + +// Specifying basePaths: +// { +// '@grafana/i18n/no-untranslated-strings': ['error', { basePaths: ['public/app/features'] }], +// } + +No results found + +// Without basePaths: +// { +// '@grafana/i18n/no-untranslated-strings': ['error'], +// } + +No results found +``` + #### `forceFix` Allows specifying directories that, if the file is present within, then the rule will automatically fix the errors. This is primarily a workaround to allow for automatic mark up of new violations as the rule evolves. diff --git a/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.cjs b/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.cjs index 7d741720bc2..2b058426d53 100644 --- a/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.cjs +++ b/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.cjs @@ -2,8 +2,8 @@ /** @typedef {import('@typescript-eslint/utils').TSESTree.Node} Node */ /** @typedef {import('@typescript-eslint/utils').TSESTree.JSXElement} JSXElement */ /** @typedef {import('@typescript-eslint/utils').TSESTree.JSXFragment} JSXFragment */ -/** @typedef {import('@typescript-eslint/utils').TSESLint.RuleModule<'noUntranslatedStrings' | 'noUntranslatedStringsProp' | 'wrapWithTrans' | 'wrapWithT' | 'noUntranslatedStringsProperties', [{ forceFix: string[] , calleesToIgnore: string[] }]>} RuleDefinition */ -/** @typedef {import('@typescript-eslint/utils/ts-eslint').RuleContext<'noUntranslatedStrings' | 'noUntranslatedStringsProp' | 'wrapWithTrans' | 'wrapWithT' | 'noUntranslatedStringsProperties', [{forceFix: string[], calleesToIgnore: string[]}]>} RuleContextWithOptions */ +/** @typedef {import('@typescript-eslint/utils').TSESLint.RuleModule<'noUntranslatedStrings' | 'noUntranslatedStringsProp' | 'wrapWithTrans' | 'wrapWithT' | 'noUntranslatedStringsProperties', [{ forceFix: string[] , calleesToIgnore: string[], basePaths: string[] }]>} RuleDefinition */ +/** @typedef {import('@typescript-eslint/utils/ts-eslint').RuleContext<'noUntranslatedStrings' | 'noUntranslatedStringsProp' | 'wrapWithTrans' | 'wrapWithT' | 'noUntranslatedStringsProperties', [{forceFix: string[], calleesToIgnore: string[], basePaths: string[]}]>} RuleContextWithOptions */ const { getNodeValue, @@ -301,12 +301,19 @@ const noUntranslatedStrings = createRule({ }, default: [], }, + basePaths: { + type: 'array', + items: { + type: 'string', + }, + default: ['src'], + }, }, additionalProperties: false, }, ], }, - defaultOptions: [{ forceFix: [], calleesToIgnore: [] }], + defaultOptions: [{ forceFix: [], calleesToIgnore: [], basePaths: ['src'] }], }); module.exports = noUntranslatedStrings; diff --git a/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.test.js b/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.test.js index 9913ab10f73..684823ae152 100644 --- a/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.test.js +++ b/packages/grafana-i18n/src/eslint/no-untranslated-strings/no-untranslated-strings.test.js @@ -2,7 +2,7 @@ import { RuleTester } from 'eslint'; import noUntranslatedStrings from './no-untranslated-strings.cjs'; -const filename = 'public/app/features/some-feature/nested/SomeFile.tsx'; +const filename = 'src/some-feature/nested/SomeFile.tsx'; const packageName = '@grafana/i18n'; @@ -797,7 +797,7 @@ const Foo = () => { name: 'Auto fixes when options are configured', code: `const Foo = () =>
test
`, filename, - options: [{ forceFix: ['public/app/features/some-feature'] }], + options: [{ forceFix: ['src/some-feature'] }], output: `${TRANS_IMPORT} const Foo = () =>
test
`, errors: [ @@ -821,7 +821,7 @@ const Foo = () => { return
}`, filename, - options: [{ forceFix: ['public/app/features/some-feature'] }], + options: [{ forceFix: ['src/some-feature'] }], output: ` ${T_IMPORT} const Foo = () => { @@ -853,7 +853,94 @@ const Foo = () => { } }`, filename, - options: [{ forceFix: ['public/app/features/some-feature'] }], + options: [{ forceFix: ['src/some-feature'] }], + output: ` +${T_IMPORT} +const Foo = () => { + return { + label: t("some-feature.foo.label.test", "test"), + } +}`, + errors: [ + { + messageId: 'noUntranslatedStringsProperties', + suggestions: [ + { + messageId: 'wrapWithT', + output: ` +${T_IMPORT} +const Foo = () => { + return { + label: t("some-feature.foo.label.test", "test"), + } +}`, + }, + ], + }, + ], + }, + + { + name: 'Auto fixes when options are configured for a different basePath', + code: `const Foo = () =>
test
`, + filename: 'public/app/features/some-feature/nested/SomeFile.tsx', + options: [{ forceFix: ['public/app/features/some-feature'], basePaths: ['public/app/features'] }], + output: `${TRANS_IMPORT} +const Foo = () =>
test
`, + errors: [ + { + messageId: 'noUntranslatedStrings', + suggestions: [ + { + messageId: 'wrapWithTrans', + output: `${TRANS_IMPORT} +const Foo = () =>
test
`, + }, + ], + }, + ], + }, + + { + name: 'Auto fixes when options are configured for a different basePath - prop', + code: ` +const Foo = () => { + return
+}`, + filename: 'public/app/features/some-feature/nested/SomeFile.tsx', + options: [{ forceFix: ['public/app/features/some-feature'], basePaths: ['public/app/features'] }], + output: ` +${T_IMPORT} +const Foo = () => { + return
+}`, + errors: [ + { + messageId: 'noUntranslatedStringsProp', + suggestions: [ + { + messageId: 'wrapWithT', + output: ` +${T_IMPORT} +const Foo = () => { + return
+}`, + }, + ], + }, + ], + }, + + { + name: 'Auto fixes object property for a different basePath', + code: ` +const Foo = () => { + return { + label: 'test', + } +}`, + filename: 'public/app/features/some-feature/nested/SomeFile.tsx', + options: [{ forceFix: ['public/app/features/some-feature'], basePaths: ['public/app/features'] }], output: ` ${T_IMPORT} const Foo = () => { diff --git a/packages/grafana-i18n/src/eslint/no-untranslated-strings/translation-utils.cjs b/packages/grafana-i18n/src/eslint/no-untranslated-strings/translation-utils.cjs index 070e0c5ee53..8d7d4456ace 100644 --- a/packages/grafana-i18n/src/eslint/no-untranslated-strings/translation-utils.cjs +++ b/packages/grafana-i18n/src/eslint/no-untranslated-strings/translation-utils.cjs @@ -7,7 +7,7 @@ /** @typedef {import('@typescript-eslint/utils').TSESTree.JSXChild} JSXChild */ /** @typedef {import('@typescript-eslint/utils').TSESTree.Property} Property */ /** @typedef {import('@typescript-eslint/utils/ts-eslint').RuleFixer} RuleFixer */ -/** @typedef {import('@typescript-eslint/utils/ts-eslint').RuleContext<'noUntranslatedStrings' | 'noUntranslatedStringsProp' | 'wrapWithTrans' | 'wrapWithT', [{forceFix: string[]}]>} RuleContextWithOptions */ +/** @typedef {import('@typescript-eslint/utils/ts-eslint').RuleContext<'noUntranslatedStrings' | 'noUntranslatedStringsProp' | 'wrapWithTrans' | 'wrapWithT', [{forceFix: string[], calleesToIgnore: string[], basePaths: string[]}]>} RuleContextWithOptions */ const { AST_NODE_TYPES } = require('@typescript-eslint/utils'); /** @@ -150,9 +150,12 @@ function getTDeclaration(node, context) { */ function getTranslationPrefix(context) { const filename = context.filename; - const match = filename.match(/public\/app\/features\/(.+?)\//); - if (match) { - return match[1]; + const basePaths = context.options[0]?.basePaths ?? ['src']; + for (const path of basePaths) { + const match = filename.match(new RegExp(`${path}/(.+?)/`)); + if (match) { + return match[1]; + } } return null; } diff --git a/packages/grafana-prometheus/src/datasource.test.ts b/packages/grafana-prometheus/src/datasource.test.ts index 73964477bf3..bc30a4befff 100644 --- a/packages/grafana-prometheus/src/datasource.test.ts +++ b/packages/grafana-prometheus/src/datasource.test.ts @@ -1308,6 +1308,13 @@ describe('PrometheusDatasource incremental query logic', () => { expect(mockCache.requestInfo).not.toHaveBeenCalled(); }); + it('should disable incremental query when public dashboards are being used', async () => { + config.publicDashboardAccessToken = 'token'; + const request = createDataRequest([{ expr: 'rate(up[5m])', refId: 'A' }]); + await lastValueFrom(ds.query(request)); + expect(mockCache.requestInfo).not.toHaveBeenCalled(); + }); + it('should disable incremental query when any target contains $__range', async () => { const request = createDataRequest([ { expr: 'up', refId: 'A' }, diff --git a/packages/grafana-prometheus/src/datasource.ts b/packages/grafana-prometheus/src/datasource.ts index 5c79ed398ba..9755325ff78 100644 --- a/packages/grafana-prometheus/src/datasource.ts +++ b/packages/grafana-prometheus/src/datasource.ts @@ -471,7 +471,9 @@ export class PrometheusDatasource // Use incremental query only if enabled and no instant queries or no $__range variables const shouldUseIncrementalQuery = - this.hasIncrementalQuery && !request.targets.some((target) => target.instant || target.expr.includes('$__range')); + this.hasIncrementalQuery && + !config.publicDashboardAccessToken && + !request.targets.some((target) => target.instant || target.expr?.includes('$__range')); let fullOrPartialRequest: DataQueryRequest = request; let requestInfo: CacheRequestInfo | undefined = undefined; diff --git a/packages/grafana-ui/src/components/Dropdown/ButtonSelect.tsx b/packages/grafana-ui/src/components/Dropdown/ButtonSelect.tsx index 12e3eb08354..a2e23e78575 100644 --- a/packages/grafana-ui/src/components/Dropdown/ButtonSelect.tsx +++ b/packages/grafana-ui/src/components/Dropdown/ButtonSelect.tsx @@ -20,13 +20,14 @@ export interface Props extends HTMLAttributes { narrow?: boolean; variant?: ToolbarButtonVariant; tooltip?: string; + root?: HTMLElement; } /** * @deprecated Use Combobox or Dropdown instead */ const ButtonSelectComponent = (props: Props) => { - const { className, options, value, onChange, narrow, variant, ...restProps } = props; + const { className, options, value, onChange, narrow, variant, root, ...restProps } = props; const [isOpen, setIsOpen] = useState(false); const renderMenu = () => ( @@ -50,7 +51,7 @@ const ButtonSelectComponent = (props: Props) => { ); return ( - + {value?.label || (value?.value != null ? String(value?.value) : null)} diff --git a/packages/grafana-ui/src/components/Dropdown/Dropdown.tsx b/packages/grafana-ui/src/components/Dropdown/Dropdown.tsx index eff7cdc9454..77cdcc92519 100644 --- a/packages/grafana-ui/src/components/Dropdown/Dropdown.tsx +++ b/packages/grafana-ui/src/components/Dropdown/Dropdown.tsx @@ -25,12 +25,13 @@ export interface Props { overlay: React.ReactElement | (() => React.ReactElement); placement?: TooltipPlacement; children: React.ReactElement; + root?: HTMLElement; /** Amount in pixels to nudge the dropdown vertically and horizontally, respectively. */ offset?: [number, number]; onVisibleChange?: (state: boolean) => void; } -export const Dropdown = React.memo(({ children, overlay, placement, offset, onVisibleChange }: Props) => { +export const Dropdown = React.memo(({ children, overlay, placement, offset, root, onVisibleChange }: Props) => { const [show, setShow] = useState(false); const transitionRef = useRef(null); const floatingUIPlacement = getPlacement(placement); @@ -84,7 +85,7 @@ export const Dropdown = React.memo(({ children, overlay, placement, offset, onVi ...getReferenceProps(), })} {show && ( - + {/* this is handling bubbled events from the inner overlay diff --git a/packages/grafana-ui/src/components/Slider/RangeSlider.story.tsx b/packages/grafana-ui/src/components/Slider/RangeSlider.story.tsx index d00648bdfbe..eccb916483a 100644 --- a/packages/grafana-ui/src/components/Slider/RangeSlider.story.tsx +++ b/packages/grafana-ui/src/components/Slider/RangeSlider.story.tsx @@ -9,8 +9,6 @@ const meta: Meta = { controls: { exclude: ['tooltipAlwaysVisible'], }, - // TODO fix a11y issue in story and remove this - a11y: { test: 'off' }, }, argTypes: { orientation: { control: { type: 'select', options: ['horizontal', 'vertical'] } }, diff --git a/packages/grafana-ui/src/components/Slider/RangeSlider.tsx b/packages/grafana-ui/src/components/Slider/RangeSlider.tsx index 34b9d403085..794ea5b795f 100644 --- a/packages/grafana-ui/src/components/Slider/RangeSlider.tsx +++ b/packages/grafana-ui/src/components/Slider/RangeSlider.tsx @@ -3,6 +3,8 @@ import { Global } from '@emotion/react'; import Slider, { SliderProps } from 'rc-slider'; import { useCallback } from 'react'; +import { t } from '@grafana/i18n'; + import { useStyles2 } from '../../themes/ThemeContext'; import HandleTooltip from './HandleTooltip'; @@ -44,6 +46,7 @@ export const RangeSlider = ({ const isHorizontal = orientation === 'horizontal'; const styles = useStyles2(getStyles, isHorizontal); + const dragHandleAriaLabel = t('grafana-ui.range-slider.drag-handle-aria-label', 'Use arrow keys to change the value'); const tipHandleRender: SliderProps['handleRender'] = (node, handleProps) => { return ( @@ -73,6 +76,7 @@ export const RangeSlider = ({ vertical={!isHorizontal} reverse={reverse} handleRender={tipHandleRender} + ariaLabelForHandle={dragHandleAriaLabel} />
); diff --git a/packages/grafana-ui/src/components/Slider/Slider.story.tsx b/packages/grafana-ui/src/components/Slider/Slider.story.tsx index 770038dbc64..207a33ec5fe 100644 --- a/packages/grafana-ui/src/components/Slider/Slider.story.tsx +++ b/packages/grafana-ui/src/components/Slider/Slider.story.tsx @@ -1,4 +1,7 @@ import { StoryFn, Meta } from '@storybook/react'; +import { useId } from 'react'; + +import { Field } from '../Forms/Field'; import { Slider } from './Slider'; @@ -12,8 +15,6 @@ const meta: Meta = { knobs: { disabled: true, }, - // TODO fix a11y issue in story and remove this - a11y: { test: 'off' }, }, argTypes: { orientation: { control: { type: 'select', options: ['horizontal', 'vertical'] } }, @@ -31,17 +32,25 @@ const meta: Meta = { }; export const Basic: StoryFn = (args) => { + const id = useId(); + return (
- + + +
); }; export const WithMarks: StoryFn = (args) => { + const id = useId(); + return (
- + + +
); }; diff --git a/packages/grafana-ui/src/components/Slider/Slider.test.tsx b/packages/grafana-ui/src/components/Slider/Slider.test.tsx index 2723a180b3c..e24ccd7e751 100644 --- a/packages/grafana-ui/src/components/Slider/Slider.test.tsx +++ b/packages/grafana-ui/src/components/Slider/Slider.test.tsx @@ -7,6 +7,7 @@ import { SliderProps } from './types'; const sliderProps: SliderProps = { min: 10, max: 20, + inputId: 'slider-test', }; describe('Slider', () => { diff --git a/packages/grafana-ui/src/components/Slider/Slider.tsx b/packages/grafana-ui/src/components/Slider/Slider.tsx index c677d402dc9..e8c377070bf 100644 --- a/packages/grafana-ui/src/components/Slider/Slider.tsx +++ b/packages/grafana-ui/src/components/Slider/Slider.tsx @@ -3,6 +3,8 @@ import { Global } from '@emotion/react'; import SliderComponent from 'rc-slider'; import { useState, useCallback, ChangeEvent, FocusEvent } from 'react'; +import { t } from '@grafana/i18n'; + import { useStyles2 } from '../../themes/ThemeContext'; import { Input } from '../Input/Input'; @@ -24,11 +26,14 @@ export const Slider = ({ ariaLabelForHandle, marks, included, + inputId, }: SliderProps) => { const isHorizontal = orientation === 'horizontal'; const styles = useStyles2(getStyles, isHorizontal, Boolean(marks)); const SliderWithTooltip = SliderComponent; const [sliderValue, setSliderValue] = useState(value ?? min); + const dragHandleAriaLabel = + ariaLabelForHandle ?? t('grafana-ui.slider.drag-handle-aria-label', 'Use arrow keys to change the value'); const onSliderChange = useCallback( (v: number | number[]) => { @@ -102,7 +107,7 @@ export const Slider = ({ onChangeComplete={handleChangeComplete} vertical={!isHorizontal} reverse={reverse} - ariaLabelForHandle={ariaLabelForHandle} + ariaLabelForHandle={dragHandleAriaLabel} marks={marks} included={included} /> @@ -116,6 +121,7 @@ export const Slider = ({ onBlur={onSliderInputBlur} min={min} max={max} + id={inputId} />
diff --git a/packages/grafana-ui/src/components/Slider/types.ts b/packages/grafana-ui/src/components/Slider/types.ts index 2551ac84937..812a8bed7d7 100644 --- a/packages/grafana-ui/src/components/Slider/types.ts +++ b/packages/grafana-ui/src/components/Slider/types.ts @@ -21,6 +21,7 @@ export interface SliderProps extends CommonSliderProps { onAfterChange?: (value?: number) => void; formatTooltipResult?: (value: number) => number; ariaLabelForHandle?: string; + inputId: string; } export interface RangeSliderProps extends CommonSliderProps { diff --git a/packages/grafana-ui/src/components/Table/TableNG/Filter/Filter.tsx b/packages/grafana-ui/src/components/Table/TableNG/Filter/Filter.tsx index 2f219a4dd06..3c00ba45699 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/Filter/Filter.tsx +++ b/packages/grafana-ui/src/components/Table/TableNG/Filter/Filter.tsx @@ -14,9 +14,9 @@ import { FilterPopup } from './FilterPopup'; interface Props { name: string; - rows: any[]; + rows: TableRow[]; filter: FilterType; - setFilter: (value: FilterType) => void; + setFilter: React.Dispatch>; field?: Field; crossFilterOrder: string[]; crossFilterRows: { [key: string]: TableRow[] }; diff --git a/packages/grafana-ui/src/components/Table/TableNG/Filter/FilterPopup.tsx b/packages/grafana-ui/src/components/Table/TableNG/Filter/FilterPopup.tsx index 5e856d26a05..de258a44815 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/Filter/FilterPopup.tsx +++ b/packages/grafana-ui/src/components/Table/TableNG/Filter/FilterPopup.tsx @@ -1,5 +1,5 @@ import { css } from '@emotion/css'; -import React, { useCallback, useMemo, useState } from 'react'; +import React, { useCallback, useMemo, useRef, useState } from 'react'; import { Field, GrafanaTheme2, SelectableValue } from '@grafana/data'; import { selectors } from '@grafana/e2e-selectors'; @@ -12,7 +12,7 @@ import { ButtonSelect } from '../../../Dropdown/ButtonSelect'; import { FilterInput } from '../../../FilterInput/FilterInput'; import { Label } from '../../../Forms/Label'; import { Stack } from '../../../Layout/Stack/Stack'; -import { FilterType } from '../types'; +import { FilterType, TableRow } from '../types'; import { getDisplayName } from '../utils'; import { FilterList } from './FilterList'; @@ -36,9 +36,9 @@ const OPERATORS = Object.values(operatorSelectableValues); interface Props { name: string; - rows: any[]; - filterValue: any; - setFilter: (value: any) => void; + rows: TableRow[]; + filterValue?: Array>; + setFilter: React.Dispatch>; onClose: () => void; field?: Field; searchFilter: string; @@ -65,6 +65,7 @@ export const FilterPopup = ({ const filteredOptions = useMemo(() => getFilteredOptions(options, filterValue), [options, filterValue]); const [values, setValues] = useState(filteredOptions); const [matchCase, setMatchCase] = useState(false); + const containerRef = useRef(null); const onCancel = useCallback((event?: React.MouseEvent) => onClose(), [onClose]); @@ -114,6 +115,7 @@ export const FilterPopup = ({ className={styles.filterContainer} onClick={stopPropagation} data-testid={selectors.components.Panels.Visualization.TableNG.Filters.Container} + ref={containerRef} > @@ -124,6 +126,7 @@ export const FilterPopup = ({ onChange={setOperator} value={operator} tooltip={operator.description} + root={containerRef.current ?? undefined} /> diff --git a/packages/grafana-ui/src/components/Table/TableNG/Filter/utils.ts b/packages/grafana-ui/src/components/Table/TableNG/Filter/utils.ts index 862ba9856c6..587add826c5 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/Filter/utils.ts +++ b/packages/grafana-ui/src/components/Table/TableNG/Filter/utils.ts @@ -1,8 +1,9 @@ import { Field, formattedValueToString, SelectableValue } from '@grafana/data'; +import { TableRow } from '../types'; import { getDisplayName } from '../utils'; -export function calculateUniqueFieldValues(rows: any[], field?: Field) { +export function calculateUniqueFieldValues(rows: TableRow[], field?: Field) { if (!field || rows.length === 0) { return {}; } @@ -12,8 +13,7 @@ export function calculateUniqueFieldValues(rows: any[], field?: Field) { for (let index = 0; index < rows.length; index++) { const row = rows[index]; const fieldValue = row[getDisplayName(field)]; - const displayValue = field.display ? field.display(fieldValue) : fieldValue; - const value = field.display ? formattedValueToString(displayValue) : displayValue; + const value = field.display ? formattedValueToString(field.display(fieldValue)) : String(fieldValue); set[value || '(Blanks)'] = value; } diff --git a/packages/grafana-ui/src/components/Table/TableNG/TableNG.tsx b/packages/grafana-ui/src/components/Table/TableNG/TableNG.tsx index c45a44eac8d..17c54fde979 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/TableNG.tsx +++ b/packages/grafana-ui/src/components/Table/TableNG/TableNG.tsx @@ -97,6 +97,7 @@ import { withDataLinksActionsTooltip, getSummaryCellTextAlign, parseStyleJson, + IS_SAFARI_26, } from './utils'; const EXPANDED_COLUMN_KEY = 'expanded'; @@ -287,7 +288,7 @@ export function TableNG(props: TableNGProps) { const commonDataGridProps = useMemo( () => ({ - enableVirtualization: enableVirtualization !== false && rowHeight !== 'auto', + enableVirtualization: !IS_SAFARI_26 && enableVirtualization !== false && rowHeight !== 'auto', defaultColumnOptions: { minWidth: 50, resizable: true, @@ -460,7 +461,8 @@ export function TableNG(props: TableNGProps) { ? clsx('table-cell-actions', getCellActionStyles(theme, textAlign)) : undefined; - const shouldOverflow = rowHeight !== 'auto' && (shouldTextOverflow(field) || Boolean(maxRowHeight)); + const shouldOverflow = + !IS_SAFARI_26 && rowHeight !== 'auto' && (shouldTextOverflow(field) || Boolean(maxRowHeight)); const textWrap = rowHeight === 'auto' || shouldTextWrap(field); const withTooltip = withDataLinksActionsTooltip(field, cellType); const canBeColorized = canFieldBeColorized(cellType, applyToRowBgFn); @@ -789,7 +791,7 @@ export function TableNG(props: TableNGProps) { const displayedEnd = pageRangeEnd; const numRows = sortedRows.length; - return ( + let rendered = ( <> {...commonDataGridProps} @@ -877,6 +879,12 @@ export function TableNG(props: TableNGProps) { )} ); + + if (IS_SAFARI_26) { + rendered =
{rendered}
; + } + + return rendered; } /** diff --git a/packages/grafana-ui/src/components/Table/TableNG/hooks.ts b/packages/grafana-ui/src/components/Table/TableNG/hooks.ts index cc648af019a..83c320dacbf 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/hooks.ts +++ b/packages/grafana-ui/src/components/Table/TableNG/hooks.ts @@ -16,6 +16,7 @@ import { computeColWidths, buildHeaderHeightMeasurers, buildCellHeightMeasurers, + IS_SAFARI_26, } from './utils'; // Helper function to get displayed value @@ -468,7 +469,7 @@ export function useScrollbarWidth(ref: RefObject, height: number useLayoutEffect(() => { const el = ref.current?.element; - if (!el) { + if (!el || IS_SAFARI_26) { return; } diff --git a/packages/grafana-ui/src/components/Table/TableNG/styles.ts b/packages/grafana-ui/src/components/Table/TableNG/styles.ts index feb5e650d90..7bc859dcd62 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/styles.ts +++ b/packages/grafana-ui/src/components/Table/TableNG/styles.ts @@ -1,11 +1,12 @@ import { css } from '@emotion/css'; import { Property } from 'csstype'; +import memoize from 'micro-memoize'; import { GrafanaTheme2, colorManipulator } from '@grafana/data'; import { COLUMN, TABLE } from './constants'; import { TableCellStyles } from './types'; -import { getJustifyContent, TextAlign } from './utils'; +import { getJustifyContent, IS_SAFARI_26, TextAlign } from './utils'; export const getGridStyles = (theme: GrafanaTheme2, enablePagination?: boolean, transparent?: boolean) => { const bgColor = transparent ? theme.colors.background.canvas : theme.colors.background.primary; @@ -51,14 +52,14 @@ export const getGridStyles = (theme: GrafanaTheme2, enablePagination?: boolean, '& > :not(.rdg-summary-row, .rdg-header-row) > .rdg-cell': { [getActiveCellSelector()]: { boxShadow: theme.shadows.z2 }, // selected cells should appear below hovered cells. - '&:hover': { zIndex: theme.zIndex.tooltip - 7 }, + ...(!IS_SAFARI_26 && { '&:hover': { zIndex: theme.zIndex.tooltip - 7 } }), '&[aria-selected=true]': { zIndex: theme.zIndex.tooltip - 6 }, }, '.rdg-cell.rdg-cell-frozen': { - backgroundColor: '--rdg-row-background-color', + backgroundColor: 'var(--rdg-row-background-color)', zIndex: theme.zIndex.tooltip - 4, - '&:hover': { zIndex: theme.zIndex.tooltip - 2 }, + ...(!IS_SAFARI_26 && { '&:hover': { zIndex: theme.zIndex.tooltip - 2 } }), '&[aria-selected=true]': { zIndex: theme.zIndex.tooltip - 3 }, }, @@ -70,7 +71,6 @@ export const getGridStyles = (theme: GrafanaTheme2, enablePagination?: boolean, }, }, }, - '.rdg-summary-row >': { '.rdg-cell': { // 0.75 padding causes "jumping" on hover. @@ -123,6 +123,7 @@ export const getGridStyles = (theme: GrafanaTheme2, enablePagination?: boolean, padding: theme.spacing(0, 1, 0, 2), }), menuItem: css({ maxWidth: '200px' }), + safariWrapper: css({ contain: 'strict', height: '100%' }), }; }; @@ -232,5 +233,22 @@ export const getTooltipStyles = (theme: GrafanaTheme2, textAlign: TextAlign) => }), }); -export const getActiveCellSelector = (isNested?: boolean) => - isNested ? '.rdg-cell:hover &, [aria-selected=true] &' : '&:hover, &[aria-selected=true]'; +const ACTIVE_CELL_SELECTORS = { + hover: { + nested: '.rdg-cell:hover &', + normal: '&:hover', + }, + selected: { + nested: '[aria-selected=true] &', + normal: '&[aria-selected=true]', + }, +} as const; + +export const getActiveCellSelector = memoize((isNested?: boolean) => { + const selectors = []; + selectors.push(ACTIVE_CELL_SELECTORS.selected[isNested ? 'nested' : 'normal']); + if (!IS_SAFARI_26) { + selectors.push(ACTIVE_CELL_SELECTORS.hover[isNested ? 'nested' : 'normal']); + } + return selectors.join(', '); +}); diff --git a/packages/grafana-ui/src/components/Table/TableNG/utils.ts b/packages/grafana-ui/src/components/Table/TableNG/utils.ts index a133fe883b0..848f21957b0 100644 --- a/packages/grafana-ui/src/components/Table/TableNG/utils.ts +++ b/packages/grafana-ui/src/components/Table/TableNG/utils.ts @@ -1029,3 +1029,13 @@ export function parseStyleJson(rawValue: unknown): CSSProperties | void { } } } + +// Safari 26 introduced rendering bugs which require us to disable several features of the table. +export const IS_SAFARI_26 = (() => { + if (navigator == null) { + return false; + } + const userAgent = navigator.userAgent; + const safariVersionMatch = userAgent.match(/Version\/(\d+)\./); + return safariVersionMatch && parseInt(safariVersionMatch[1], 10) === 26; +})(); diff --git a/pkg/aggregator/go.mod b/pkg/aggregator/go.mod index f7bee6dfc48..78ebfa0719e 100644 --- a/pkg/aggregator/go.mod +++ b/pkg/aggregator/go.mod @@ -153,7 +153,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/pkg/aggregator/go.sum b/pkg/aggregator/go.sum index 9830f7d5659..c197e46cd0f 100644 --- a/pkg/aggregator/go.sum +++ b/pkg/aggregator/go.sum @@ -445,8 +445,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go. google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/api/admin_users.go b/pkg/api/admin_users.go index 64453f86688..84f4b7b4ae3 100644 --- a/pkg/api/admin_users.go +++ b/pkg/api/admin_users.go @@ -17,6 +17,7 @@ import ( contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" "github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/services/org" + pref "github.com/grafana/grafana/pkg/services/preference" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/web" ) @@ -222,7 +223,7 @@ func (hs *HTTPServer) AdminDeleteUser(c *contextmodel.ReqContext) response.Respo return nil }) g.Go(func() error { - if err := hs.preferenceService.DeleteByUser(ctx, cmd.UserID); err != nil { + if err := hs.preferenceService.Delete(ctx, &pref.DeleteCommand{UserID: cmd.UserID}); err != nil { return err } return nil diff --git a/pkg/api/api.go b/pkg/api/api.go index 87cf69d966d..4710056875d 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -125,11 +125,6 @@ func (hs *HTTPServer) registerRoutes() { r.Get("/admin/migrate-to-cloud", authorize(cloudmigration.MigrationAssistantAccess), hs.Index) } - // feature toggle admin page - if hs.Features.IsEnabledGlobally(featuremgmt.FlagFeatureToggleAdminPage) { - r.Get("/admin/featuretoggles", authorize(ac.EvalPermission(ac.ActionFeatureManagementRead)), hs.Index) - } - // secrets management page if hs.Features.IsEnabledGlobally(featuremgmt.FlagSecretsManagementAppPlatform) && hs.Features.IsEnabledGlobally(featuremgmt.FlagSecretsManagementAppPlatformUI) { r.Get("/admin/secrets", authorize(ac.EvalAny( diff --git a/pkg/api/apierrors/dashboard.go b/pkg/api/apierrors/dashboard.go index b3d11f2f0d8..84584684b50 100644 --- a/pkg/api/apierrors/dashboard.go +++ b/pkg/api/apierrors/dashboard.go @@ -17,8 +17,9 @@ import ( // ToDashboardErrorResponse returns a different response status according to the dashboard error type func ToDashboardErrorResponse(ctx context.Context, pluginStore pluginstore.Store, err error) response.Response { + // --- Dashboard errors --- var dashboardErr dashboardaccess.DashboardErr - if ok := errors.As(err, &dashboardErr); ok { + if errors.As(err, &dashboardErr) { if body := dashboardErr.Body(); body != nil { return response.JSON(dashboardErr.StatusCode, body) } @@ -28,28 +29,32 @@ func ToDashboardErrorResponse(ctx context.Context, pluginStore pluginstore.Store return response.Error(dashboardErr.StatusCode, dashboardErr.Error(), nil) } + // --- 400 Bad Request --- if errors.Is(err, dashboards.ErrFolderNotFound) { return response.Error(http.StatusBadRequest, err.Error(), nil) } var pluginErr dashboards.UpdatePluginDashboardError - if ok := errors.As(err, &pluginErr); ok { + if errors.As(err, &pluginErr) { message := fmt.Sprintf("The dashboard belongs to plugin %s.", pluginErr.PluginId) // look up plugin name if plugin, exists := pluginStore.Plugin(ctx, pluginErr.PluginId); exists { message = fmt.Sprintf("The dashboard belongs to plugin %s.", plugin.Name) } + // --- 412 Precondition Failed --- return response.JSON(http.StatusPreconditionFailed, util.DynMap{"status": "plugin-dashboard", "message": message}) } + // --- 413 Payload Too Large --- if apierrors.IsRequestEntityTooLargeError(err) { return response.Error(http.StatusRequestEntityTooLarge, fmt.Sprintf("Dashboard is too large, max is %d MB", apiserver.MaxRequestBodyBytes/1024/1024), err) } + // --- Kubernetes status errors --- var statusErr *apierrors.StatusError if errors.As(err, &statusErr) { return response.Error(int(statusErr.ErrStatus.Code), statusErr.ErrStatus.Message, err) } - return response.Error(http.StatusInternalServerError, "Failed to save dashboard", err) + return response.ErrOrFallback(http.StatusInternalServerError, fmt.Sprintf("Dashboard API error: %s", err.Error()), err) } diff --git a/pkg/api/apierrors/dashboard_test.go b/pkg/api/apierrors/dashboard_test.go new file mode 100644 index 00000000000..1e1512e07d3 --- /dev/null +++ b/pkg/api/apierrors/dashboard_test.go @@ -0,0 +1,128 @@ +package apierrors + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/grafana/grafana/pkg/api/response" + "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/services/apiserver" + "github.com/grafana/grafana/pkg/services/dashboards" + "github.com/grafana/grafana/pkg/services/dashboards/dashboardaccess" + "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore" + "github.com/grafana/grafana/pkg/util" +) + +type fakePluginStore struct { + pluginstore.Store + plugins map[string]pluginstore.Plugin +} + +func (f *fakePluginStore) Plugin(_ context.Context, id string) (pluginstore.Plugin, bool) { + p, ok := f.plugins[id] + return p, ok +} + +func TestToDashboardErrorResponse(t *testing.T) { + pluginStoreWithPlugin := &fakePluginStore{ + plugins: map[string]pluginstore.Plugin{ + "test-plugin": {JSONData: plugins.JSONData{Name: "Test Plugin"}}, + }, + } + pluginStoreWithoutPlugin := &fakePluginStore{ + plugins: map[string]pluginstore.Plugin{}, + } + + tests := []struct { + name string + pluginStore pluginstore.Store + input error + want response.Response + }{ + // --- 400 Bad Request --- + { + name: "dashboard error with a bad-request status", + pluginStore: pluginStoreWithoutPlugin, + input: dashboardaccess.DashboardErr{Reason: "Bad Request", StatusCode: http.StatusBadRequest}, + want: response.Error(http.StatusBadRequest, "Bad Request", nil), + }, + // --- 403 Forbidden --- + { + name: "dashboard error with a forbidden status", + pluginStore: pluginStoreWithoutPlugin, + input: &k8sErrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusForbidden, Message: "access denied"}}, + want: response.Error(http.StatusForbidden, "access denied", &k8sErrors.StatusError{ErrStatus: metav1.Status{Code: http.StatusForbidden, Message: "access denied"}}), + }, + // --- 404 Not Found --- + { + name: "folder not found error", + pluginStore: pluginStoreWithoutPlugin, + input: dashboards.ErrFolderNotFound, + want: response.Error(http.StatusBadRequest, dashboards.ErrFolderNotFound.Error(), nil), + }, + { + name: "dashboard error with a non-bad-request status", + pluginStore: pluginStoreWithoutPlugin, + input: dashboardaccess.DashboardErr{Reason: "Not Found", StatusCode: http.StatusNotFound}, + want: response.Error(http.StatusNotFound, "Not Found", dashboardaccess.DashboardErr{Reason: "Not Found", StatusCode: http.StatusNotFound}), + }, + { + name: "plugin dashboard error where plugin is found", + pluginStore: pluginStoreWithPlugin, + input: dashboards.UpdatePluginDashboardError{PluginId: "test-plugin"}, + want: response.JSON(http.StatusPreconditionFailed, util.DynMap{"status": "plugin-dashboard", "message": "The dashboard belongs to plugin Test Plugin."}), + }, + // --- 412 Precondition Failed --- + { + name: "plugin dashboard error where plugin is not found", + pluginStore: pluginStoreWithoutPlugin, + input: dashboards.UpdatePluginDashboardError{PluginId: "unknown-plugin"}, + want: response.JSON(http.StatusPreconditionFailed, util.DynMap{"status": "plugin-dashboard", "message": "The dashboard belongs to plugin unknown-plugin."}), + }, + // --- 413 Payload Too Large --- + { + name: "request entity too large error", + pluginStore: pluginStoreWithoutPlugin, + input: k8sErrors.NewRequestEntityTooLargeError("request is too large"), + want: response.Error(http.StatusRequestEntityTooLarge, fmt.Sprintf("Dashboard is too large, max is %d MB", apiserver.MaxRequestBodyBytes/1024/1024), k8sErrors.NewRequestEntityTooLargeError("request is too large")), + }, + // --- Kubernetes status errors --- + { + name: "kubernetes status error", + pluginStore: pluginStoreWithoutPlugin, + input: &k8sErrors.StatusError{ + ErrStatus: metav1.Status{ + Code: 412, + Message: "the dashboard has been changed by someone else", + }, + }, + want: response.Error(412, "the dashboard has been changed by someone else", &k8sErrors.StatusError{ + ErrStatus: metav1.Status{ + Code: 412, + Message: "the dashboard has been changed by someone else", + }, + }), + }, + // --- 500 Internal Server Error --- + { + name: "fallback error for an unknown error", + pluginStore: pluginStoreWithoutPlugin, + input: errors.New("an unexpected error"), + want: response.Error(http.StatusInternalServerError, "Dashboard API error: an unexpected error", errors.New("an unexpected error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res := ToDashboardErrorResponse(context.Background(), tt.pluginStore, tt.input) + require.Equal(t, tt.want, res) + }) + } +} diff --git a/pkg/api/apierrors/folder.go b/pkg/api/apierrors/folder.go index 7ed21c04fb5..81b16c3899e 100644 --- a/pkg/api/apierrors/folder.go +++ b/pkg/api/apierrors/folder.go @@ -3,6 +3,7 @@ package apierrors import ( "encoding/json" "errors" + "fmt" "net/http" k8sErrors "k8s.io/apimachinery/pkg/api/errors" @@ -17,45 +18,49 @@ import ( // ToFolderErrorResponse returns a different response status according to the folder error type func ToFolderErrorResponse(err error) response.Response { + // --- Dashboard errors --- var dashboardErr dashboardaccess.DashboardErr if ok := errors.As(err, &dashboardErr); ok { return response.Error(dashboardErr.StatusCode, err.Error(), err) } + // --- 400 Bad Request --- if errors.Is(err, dashboards.ErrFolderTitleEmpty) || errors.Is(err, dashboards.ErrDashboardTypeMismatch) || errors.Is(err, dashboards.ErrDashboardInvalidUid) || - errors.Is(err, dashboards.ErrDashboardUidTooLong) { + errors.Is(err, dashboards.ErrDashboardUidTooLong) || + errors.Is(err, folder.ErrFolderCannotBeParentOfItself) { return response.Error(http.StatusBadRequest, err.Error(), nil) } + // --- 403 Forbidden --- if errors.Is(err, dashboards.ErrFolderAccessDenied) { return response.Error(http.StatusForbidden, "Access denied", err) } + // --- 404 Not Found --- if errors.Is(err, dashboards.ErrFolderNotFound) { return response.JSON(http.StatusNotFound, util.DynMap{"status": "not-found", "message": dashboards.ErrFolderNotFound.Error()}) } + // --- 409 Conflict --- if errors.Is(err, dashboards.ErrFolderWithSameUIDExists) { return response.Error(http.StatusConflict, err.Error(), nil) } + // --- 412 Precondition Failed --- if errors.Is(err, dashboards.ErrFolderVersionMismatch) || k8sErrors.IsAlreadyExists(err) { return response.JSON(http.StatusPreconditionFailed, util.DynMap{"status": "version-mismatch", "message": dashboards.ErrFolderVersionMismatch.Error()}) } - if errors.Is(err, folder.ErrMaximumDepthReached) { - return response.JSON(http.StatusBadRequest, util.DynMap{"messageId": "folder.maximum-depth-reached", "message": folder.ErrMaximumDepthReached.Error()}) - } - + // --- Kubernetes status errors --- var statusErr *k8sErrors.StatusError if errors.As(err, &statusErr) { return response.Error(int(statusErr.ErrStatus.Code), statusErr.ErrStatus.Message, err) } - return response.ErrOrFallback(http.StatusInternalServerError, "Folder API error", err) + return response.ErrOrFallback(http.StatusInternalServerError, fmt.Sprintf("Folder API error: %s", err.Error()), err) } func ToFolderStatusError(err error) k8sErrors.StatusError { diff --git a/pkg/api/apierrors/folder_test.go b/pkg/api/apierrors/folder_test.go index a1a02c1040e..2def7fb48a4 100644 --- a/pkg/api/apierrors/folder_test.go +++ b/pkg/api/apierrors/folder_test.go @@ -21,61 +21,109 @@ func TestToFolderErrorResponse(t *testing.T) { input error want response.Response }{ + // --- 400 Bad Request --- { name: "dashboard error", - input: dashboardaccess.DashboardErr{StatusCode: 400, Reason: "Dashboard Error", Status: "error"}, - want: response.Error(400, "Dashboard Error", dashboardaccess.DashboardErr{StatusCode: 400, Reason: "Dashboard Error", Status: "error"}), + input: dashboardaccess.DashboardErr{StatusCode: http.StatusBadRequest, Reason: "Dashboard Error", Status: "error"}, + want: response.Error(http.StatusBadRequest, "Dashboard Error", dashboardaccess.DashboardErr{StatusCode: http.StatusBadRequest, Reason: "Dashboard Error", Status: "error"}), + }, + { + name: "maximum depth reached", + input: folder.ErrMaximumDepthReached.Errorf("Maximum nested folder depth reached"), + want: response.Err(folder.ErrMaximumDepthReached.Errorf("Maximum nested folder depth reached")), + }, + { + name: "bad request errors", + input: folder.ErrBadRequest.Errorf("Bad request error"), + want: response.Err(folder.ErrBadRequest.Errorf("Bad request error")), + }, + { + name: "conflict error", + input: folder.ErrConflict.Errorf("Conflict error"), + want: response.Err(folder.ErrConflict.Errorf("Conflict error")), + }, + { + name: "circular reference error", + input: folder.ErrCircularReference.Errorf("Circular reference detected"), + want: response.Err(folder.ErrCircularReference.Errorf("Circular reference detected")), + }, + + { + name: "folder not empty error", + input: folder.ErrFolderNotEmpty.Errorf("Folder cannot be deleted: folder is not empty"), + want: response.Err(folder.ErrFolderNotEmpty.Errorf("Folder cannot be deleted: folder is not empty")), }, { name: "folder title empty", input: dashboards.ErrFolderTitleEmpty, - want: response.Error(400, "folder title cannot be empty", nil), + want: response.Error(http.StatusBadRequest, "folder title cannot be empty", nil), }, { name: "dashboard type mismatch", input: dashboards.ErrDashboardTypeMismatch, - want: response.Error(400, "Dashboard cannot be changed to a folder", dashboards.ErrDashboardTypeMismatch), + want: response.Error(http.StatusBadRequest, "Dashboard cannot be changed to a folder", dashboards.ErrDashboardTypeMismatch), }, { name: "dashboard invalid uid", input: dashboards.ErrDashboardInvalidUid, - want: response.Error(400, "uid contains illegal characters", dashboards.ErrDashboardInvalidUid), + want: response.Error(http.StatusBadRequest, "uid contains illegal characters", dashboards.ErrDashboardInvalidUid), }, { name: "dashboard uid too long", input: dashboards.ErrDashboardUidTooLong, - want: response.Error(400, "uid too long, max 40 characters", dashboards.ErrDashboardUidTooLong), + want: response.Error(http.StatusBadRequest, "uid too long, max 40 characters", dashboards.ErrDashboardUidTooLong), }, + { + name: "folder cannot be parent of itself", + input: folder.ErrFolderCannotBeParentOfItself, + want: response.Error(http.StatusBadRequest, folder.ErrFolderCannotBeParentOfItself.Error(), nil), + }, + // --- 403 Forbidden --- { name: "folder access denied", input: dashboards.ErrFolderAccessDenied, want: response.Error(http.StatusForbidden, "Access denied", dashboards.ErrFolderAccessDenied), }, + // --- 404 Not Found --- { name: "folder not found", input: dashboards.ErrFolderNotFound, want: response.JSON(http.StatusNotFound, util.DynMap{"status": "not-found", "message": dashboards.ErrFolderNotFound.Error()}), }, + // --- 409 Conflict --- { name: "folder with same uid exists", input: dashboards.ErrFolderWithSameUIDExists, want: response.Error(http.StatusConflict, dashboards.ErrFolderWithSameUIDExists.Error(), nil), }, + // --- 412 Precondition Failed --- { name: "folder version mismatch", input: dashboards.ErrFolderVersionMismatch, want: response.JSON(http.StatusPreconditionFailed, util.DynMap{"status": "version-mismatch", "message": dashboards.ErrFolderVersionMismatch.Error()}), }, + // --- 500 Internal Server Error --- { - name: "folder max depth reached", - input: folder.ErrMaximumDepthReached, - want: response.JSON(http.StatusBadRequest, util.DynMap{"messageId": "folder.maximum-depth-reached", "message": folder.ErrMaximumDepthReached.Error()}), + name: "target registry srv conflict error", + input: folder.ErrTargetRegistrySrvConflict.Errorf("Target registry service conflict"), + want: response.Err(folder.ErrTargetRegistrySrvConflict.Errorf("Target registry service conflict")), }, { - name: "fallback error", - input: errors.New("some error"), - want: response.ErrOrFallback(http.StatusInternalServerError, "Folder API error", errors.New("some error")), + name: "internal error", + input: folder.ErrInternal.Errorf("Internal error"), + want: response.Err(folder.ErrInternal.Errorf("Internal error")), }, + { + name: "database error", + input: folder.ErrDatabaseError.Errorf("Database error"), + want: response.Err(folder.ErrDatabaseError.Errorf("Database error")), + }, + { + name: "fallback error for an unknown error", + input: errors.New("an unexpected error"), + want: response.Error(http.StatusInternalServerError, "Folder API error: an unexpected error", errors.New("an unexpected error")), + }, + // --- Kubernetes status errors --- { name: "kubernetes status error", input: &k8sErrors.StatusError{ diff --git a/pkg/api/plugins_test.go b/pkg/api/plugins_test.go index 4409395ebe2..cf8a2eca399 100644 --- a/pkg/api/plugins_test.go +++ b/pkg/api/plugins_test.go @@ -12,19 +12,18 @@ import ( "strings" "testing" + "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/grafana/grafana-plugin-sdk-go/backend" - "github.com/grafana/grafana/pkg/plugins/auth" - "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/log/logtest" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/plugins/auth" "github.com/grafana/grafana/pkg/plugins/config" "github.com/grafana/grafana/pkg/plugins/manager/fakes" "github.com/grafana/grafana/pkg/plugins/manager/filestore" @@ -528,9 +527,12 @@ func callGetPluginAsset(sc *scenarioContext) { func pluginAssetScenario(t *testing.T, desc string, url string, urlPattern string, cfg *setting.Cfg, pluginRegistry registry.Service, fn scenarioFunc) { t.Run(fmt.Sprintf("%s %s", desc, url), func(t *testing.T) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs := HTTPServer{ Cfg: cfg, - pluginStore: pluginstore.New(pluginRegistry, &fakes.FakeLoader{}), + pluginStore: store, pluginFileStore: filestore.ProvideService(pluginRegistry), log: log.NewNopLogger(), pluginsCDNService: pluginscdn.ProvideService(&config.PluginManagementCfg{ @@ -640,12 +642,14 @@ func Test_PluginsList_AccessControl(t *testing.T) { for _, tc := range tcs { t.Run(tc.desc, func(t *testing.T) { server := SetupAPITestServer(t, func(hs *HTTPServer) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs.Cfg = setting.NewCfg() hs.PluginSettings = &pluginSettings - hs.pluginStore = pluginstore.New(pluginRegistry, &fakes.FakeLoader{}) + hs.pluginStore = store hs.pluginFileStore = filestore.ProvideService(pluginRegistry) hs.managedPluginsService = managedplugins.NewNoop() - var err error hs.pluginsUpdateChecker, err = updatemanager.ProvidePluginsService( hs.Cfg, hs.pluginStore, @@ -828,9 +832,12 @@ func Test_PluginsSettings(t *testing.T) { for _, tc := range tcs { t.Run(tc.desc, func(t *testing.T) { server := SetupAPITestServer(t, func(hs *HTTPServer) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs.Cfg = setting.NewCfg() hs.PluginSettings = &pluginSettings - hs.pluginStore = pluginstore.New(pluginRegistry, &fakes.FakeLoader{}) + hs.pluginStore = store hs.pluginFileStore = filestore.ProvideService(pluginRegistry) errTracker := pluginerrs.ProvideErrorTracker() if tc.errCode != "" { @@ -844,7 +851,6 @@ func Test_PluginsSettings(t *testing.T) { sig := signature.ProvideService(pCfg, statickey.New()) hs.pluginAssets = pluginassets.ProvideService(pCfg, pluginCDN, sig, hs.pluginStore) hs.pluginErrorResolver = pluginerrs.ProvideStore(errTracker) - var err error hs.pluginsUpdateChecker, err = updatemanager.ProvidePluginsService( hs.Cfg, hs.pluginStore, @@ -896,9 +902,12 @@ func Test_UpdatePluginSetting(t *testing.T) { t.Run("should return an error when trying to disable an auto-enabled plugin", func(t *testing.T) { server := SetupAPITestServer(t, func(hs *HTTPServer) { + store, err := pluginstore.NewPluginStoreForTest(pluginRegistry, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + hs.Cfg = setting.NewCfg() hs.PluginSettings = &pluginSettings - hs.pluginStore = pluginstore.New(pluginRegistry, &fakes.FakeLoader{}) + hs.pluginStore = store hs.pluginFileStore = filestore.ProvideService(pluginRegistry) hs.managedPluginsService = managedplugins.NewNoop() hs.log = log.NewNopLogger() diff --git a/pkg/api/user.go b/pkg/api/user.go index 3d84e2cffd6..311f80e49ae 100644 --- a/pkg/api/user.go +++ b/pkg/api/user.go @@ -117,6 +117,7 @@ func (hs *HTTPServer) GetUserByLoginOrEmail(c *contextmodel.ReqContext) response } result := user.UserProfileDTO{ ID: usr.ID, + UID: usr.UID, Name: usr.Name, Email: usr.Email, Login: usr.Login, diff --git a/pkg/apimachinery/go.mod b/pkg/apimachinery/go.mod index 9551e229fd4..54f7a42a362 100644 --- a/pkg/apimachinery/go.mod +++ b/pkg/apimachinery/go.mod @@ -3,8 +3,8 @@ module github.com/grafana/grafana/pkg/apimachinery go 1.24.6 require ( - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // @grafana/identity-access-team - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 // @grafana/identity-access-team + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team github.com/stretchr/testify v1.11.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/apimachinery v0.34.1 @@ -51,7 +51,7 @@ require ( golang.org/x/text v0.29.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect diff --git a/pkg/apimachinery/go.sum b/pkg/apimachinery/go.sum index bc20da45e99..5479f1831c1 100644 --- a/pkg/apimachinery/go.sum +++ b/pkg/apimachinery/go.sum @@ -30,10 +30,10 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -132,8 +132,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/apimachinery/validation/validation.go b/pkg/apimachinery/validation/validation.go new file mode 100644 index 00000000000..777eb59fa46 --- /dev/null +++ b/pkg/apimachinery/validation/validation.go @@ -0,0 +1,105 @@ +package validation + +import ( + "regexp" + + "k8s.io/apimachinery/pkg/util/validation" +) + +const maxNameLength = 253 +const maxNamespaceLength = 40 +const minNamespaceLength = 3 +const maxGroupLength = 60 +const minGroupLength = 3 +const maxResourceLength = 40 +const minResourceLength = 3 + +const grafanaNameFmt = `^[a-zA-Z0-9:\-\_\.]*$` +const grafanaNameErrMsg string = "must consist of alphanumeric characters, '-', '_', ':' or '.'" + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "^(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt + "$" +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +const alphaCharFmt string = "[A-Za-z]" +const resourceCharFmt string = "[A-Za-z0-9-]" // alpha numeric plus dashes +const resourceFmt string = "^" + alphaCharFmt + resourceCharFmt + "*$" +const resourceErrMsg string = "must consist of alphanumeric characters and dashes, and must start with an alphabetic character" + +var ( + grafanaNameRegexp = regexp.MustCompile(grafanaNameFmt).MatchString + qualifiedNameRegexp = regexp.MustCompile(qualifiedNameFmt).MatchString + resourceRegexp = regexp.MustCompile(resourceFmt).MatchString +) + +// IsValidGrafanaName checks if the name is a valid to use for a k8s name +// Unlike normal k8s name rules, this allows the name to start with a digit +// This compromise means existing grafana UIDs are valid k8s names without migration +func IsValidGrafanaName(name string) []string { + s := len(name) + switch { + case s == 0: + return []string{"name may not be empty"} + case s > maxNameLength: + return []string{"name is too long"} + } + + if !grafanaNameRegexp(name) { + return []string{"name " + validation.RegexError(grafanaNameErrMsg, grafanaNameFmt, "MyName", "my.name", "abc-123")} + } + // In standard k8s, it must not start with a number + // however that would force us to update many many many existing resources + // so we will be slightly more lenient than standard k8s + return nil +} + +// If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsValidNamespace(namespace string) []string { + s := len(namespace) + switch { + case s == 0: + return nil // empty is OK + case s > maxNamespaceLength: + return []string{"namespace is too long"} + case s < minNamespaceLength: + return []string{"namespace is too short"} + } + if !qualifiedNameRegexp(namespace) { + return []string{"namespace " + validation.RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "abc-123")} + } + return nil +} + +// If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsValidGroup(group string) []string { + s := len(group) + switch { + case s > maxGroupLength: + return []string{"group is too long"} + case s < minGroupLength: + return []string{"group is too short"} + } + if !qualifiedNameRegexp(group) { + return []string{"group " + validation.RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "dashboards.grafana.app", "grafana-loki-datasource")} + } + return nil +} + +// If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsValidateResource(resource string) []string { + s := len(resource) + switch { + case s > maxResourceLength: + return []string{"resource is too long"} + case s < minResourceLength: + return []string{"resource is too short"} + } + if !resourceRegexp(resource) { + return []string{"resource " + validation.RegexError(resourceErrMsg, resourceFmt, "dashboards", "folders")} + } + return nil +} diff --git a/pkg/apimachinery/validation/validation_test.go b/pkg/apimachinery/validation/validation_test.go new file mode 100644 index 00000000000..3bb5fa61ad9 --- /dev/null +++ b/pkg/apimachinery/validation/validation_test.go @@ -0,0 +1,231 @@ +package validation_test + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" + k8sValidation "k8s.io/apimachinery/pkg/util/validation" + + "github.com/grafana/grafana/pkg/apimachinery/validation" +) + +func TestValidation(t *testing.T) { + // We are not using the out-of-the-box "isQualifiedName" because it allows slashes + rsp := k8sValidation.IsQualifiedName("hello/world") + require.Nil(t, rsp, "standard qualified name allows a slash") + + t.Run("name", func(t *testing.T) { + tests := []struct { + name string + input []string // variations that produce the same output + expect []string + }{{ + name: "empty", + input: []string{""}, + expect: []string{"name may not be empty"}, + }, { + name: "too long", + input: []string{strings.Repeat("0", 254)}, + expect: []string{"name is too long"}, + }, { + name: "ok", + input: []string{ + "hello", + strings.Repeat("0", 253), // very long starts with number + "hello-world", + "hello.world", + "hello_world", + "hello:world", + "123456", // starts with numbers + "aBCDEFG", // with capitals + }, + }, { + name: "bad input", + expect: []string{ + "name must consist of alphanumeric characters, '-', '_', ':' or '.' (e.g. 'MyName', or 'my.name', or 'abc-123', regex used for validation is '^[a-zA-Z0-9:\\-\\_\\.]*$')", + }, + input: []string{ + "hello world", + "hello!", + "hello~", + "hello ", + "hello*", + "hello+", + "hello=", + "hello%", + "hello/world", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, input := range tt.input { + output := validation.IsValidGrafanaName(input) + require.Equal(t, tt.expect, output, "input: %s", input) + } + }) + } + }) + + t.Run("namespace", func(t *testing.T) { + tests := []struct { + name string + input []string // variations that produce the same output + expect []string + }{{ + name: "empty is OK", + input: []string{""}, + }, { + name: "too long", + input: []string{strings.Repeat("0", 41)}, + expect: []string{"namespace is too long"}, + }, { + name: "too short", + expect: []string{"namespace is too short"}, + input: []string{"a", "1", "aa"}, + }, { + name: "ok", + input: []string{ + "hello", + strings.Repeat("a", 40), // long... alpha + "hello-world", + "hello.world", + "hello_world", + "default", + "stacks-123456", // ends with a number + "org-3", // ends with a number + "1234", // just a numbers + "aaa", + }, + }, { + name: "bad input", + expect: []string{ + "namespace must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or 'abc-123', regex used for validation is '^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$')", + }, + input: []string{ + "_bad_input", // starts with non-alpha + "hello world", + "hello!", + "hello~", + "hello ", + "hello*", + "hello+", + "hello=", + "hello%", + "hello/world", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, input := range tt.input { + output := validation.IsValidNamespace(input) + require.Equal(t, tt.expect, output, "input: %s", input) + } + }) + } + }) + + t.Run("group", func(t *testing.T) { + tests := []struct { + name string + input []string // variations that produce the same output + expect []string + }{{ + name: "too long", + expect: []string{"group is too long"}, + input: []string{strings.Repeat("0", 61)}, + }, { + name: "too short", + expect: []string{"group is too short"}, + input: []string{"a", "1", "aa"}, + }, { + name: "ok", + input: []string{ + "hello", + strings.Repeat("a", 60), // long... alpha + "dashboards.grafana.app", + "prometheus-datasource", + "1234", // just a numbers + "aaa", + }, + }, { + name: "bad input", + expect: []string{ + "group must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'dashboards.grafana.app', or 'grafana-loki-datasource', regex used for validation is '^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$')", + }, + input: []string{ + "_bad_input", // starts with non-alpha + "hello world", + "hello!", + "hello~", + "hello ", + "hello*", + "hello+", + "hello=", + "hello%", + "hello/world", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, input := range tt.input { + output := validation.IsValidGroup(input) + require.Equal(t, tt.expect, output, "input: %s", input) + } + }) + } + }) + + t.Run("resource", func(t *testing.T) { + tests := []struct { + name string + input []string // variations that produce the same output + expect []string + }{{ + name: "too long", + expect: []string{"resource is too long"}, + input: []string{strings.Repeat("0", 41)}, + }, { + name: "too short", + expect: []string{"resource is too short"}, + input: []string{"a", "1", "aa"}, + }, { + name: "ok", + input: []string{ + "hello", + strings.Repeat("a", 40), // long... alpha + "dashboards", + "folders", + "folders123", + "aaa", + "hello-world", + "hello-world-", + }, + }, { + name: "bad input", + expect: []string{ + "resource must consist of alphanumeric characters and dashes, and must start with an alphabetic character (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9-]*$')", + }, + input: []string{ + "_bad_input", + "hello world", + "hello!", + "hello~", + "hello ", + "hello*", + "hello+", + "hello=", + "hello%", + "hello/world", + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, input := range tt.input { + output := validation.IsValidateResource(input) + require.Equal(t, tt.expect, output, "input: %s", input) + } + }) + } + }) +} diff --git a/pkg/apis/featuretoggle/v0alpha1/doc.go b/pkg/apis/featuretoggle/v0alpha1/doc.go deleted file mode 100644 index 222ebfd88b5..00000000000 --- a/pkg/apis/featuretoggle/v0alpha1/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta -// +groupName=featuretoggle.grafana.app - -package v0alpha1 // import "github.com/grafana/grafana/pkg/apis/featuretoggle/v0alpha1" diff --git a/pkg/apis/featuretoggle/v0alpha1/register.go b/pkg/apis/featuretoggle/v0alpha1/register.go deleted file mode 100644 index efcad19821b..00000000000 --- a/pkg/apis/featuretoggle/v0alpha1/register.go +++ /dev/null @@ -1,54 +0,0 @@ -package v0alpha1 - -import ( - "fmt" - - "github.com/grafana/grafana/pkg/apimachinery/utils" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - GROUP = "featuretoggle.grafana.app" - VERSION = "v0alpha1" - APIVERSION = GROUP + "/" + VERSION -) - -// FeatureResourceInfo represents each feature that may have a toggle -var FeatureResourceInfo = utils.NewResourceInfo(GROUP, VERSION, - "features", "feature", "Feature", - func() runtime.Object { return &Feature{} }, - func() runtime.Object { return &FeatureList{} }, - utils.TableColumns{ - Definition: []metav1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name"}, - {Name: "Stage", Type: "string", Format: "string", Description: "Where is the flag in the dev cycle"}, - {Name: "Owner", Type: "string", Format: "string", Description: "Which team owns the feature"}, - }, - Reader: func(obj any) ([]interface{}, error) { - r, ok := obj.(*Feature) - if ok { - return []interface{}{ - r.Name, - r.Spec.Stage, - r.Spec.Owner, - }, nil - } - return nil, fmt.Errorf("expected resource or info") - }, - }, -) - -// TogglesResourceInfo represents the actual configuration -var TogglesResourceInfo = utils.NewResourceInfo(GROUP, VERSION, - "featuretoggles", "featuretoggle", "FeatureToggles", - func() runtime.Object { return &FeatureToggles{} }, - func() runtime.Object { return &FeatureTogglesList{} }, - utils.TableColumns{}, // default table converter -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: GROUP, Version: VERSION} -) diff --git a/pkg/apis/featuretoggle/v0alpha1/zz_generated.deepcopy.go b/pkg/apis/featuretoggle/v0alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 6b06fab2b6d..00000000000 --- a/pkg/apis/featuretoggle/v0alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,215 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// SPDX-License-Identifier: AGPL-3.0-only - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v0alpha1 - -import ( - commonv0alpha1 "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Feature) DeepCopyInto(out *Feature) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Feature. -func (in *Feature) DeepCopy() *Feature { - if in == nil { - return nil - } - out := new(Feature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Feature) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureList) DeepCopyInto(out *FeatureList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Feature, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureList. -func (in *FeatureList) DeepCopy() *FeatureList { - if in == nil { - return nil - } - out := new(FeatureList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureSpec) DeepCopyInto(out *FeatureSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureSpec. -func (in *FeatureSpec) DeepCopy() *FeatureSpec { - if in == nil { - return nil - } - out := new(FeatureSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureToggles) DeepCopyInto(out *FeatureToggles) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Spec != nil { - in, out := &in.Spec, &out.Spec - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureToggles. -func (in *FeatureToggles) DeepCopy() *FeatureToggles { - if in == nil { - return nil - } - out := new(FeatureToggles) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureToggles) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureTogglesList) DeepCopyInto(out *FeatureTogglesList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FeatureToggles, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTogglesList. -func (in *FeatureTogglesList) DeepCopy() *FeatureTogglesList { - if in == nil { - return nil - } - out := new(FeatureTogglesList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureTogglesList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResolvedToggleState) DeepCopyInto(out *ResolvedToggleState) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Toggles != nil { - in, out := &in.Toggles, &out.Toggles - *out = make([]ToggleStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolvedToggleState. -func (in *ResolvedToggleState) DeepCopy() *ResolvedToggleState { - if in == nil { - return nil - } - out := new(ResolvedToggleState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResolvedToggleState) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ToggleStatus) DeepCopyInto(out *ToggleStatus) { - *out = *in - if in.Source != nil { - in, out := &in.Source, &out.Source - *out = new(commonv0alpha1.ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToggleStatus. -func (in *ToggleStatus) DeepCopy() *ToggleStatus { - if in == nil { - return nil - } - out := new(ToggleStatus) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/featuretoggle/v0alpha1/zz_generated.defaults.go b/pkg/apis/featuretoggle/v0alpha1/zz_generated.defaults.go deleted file mode 100644 index 238fc2f4edc..00000000000 --- a/pkg/apis/featuretoggle/v0alpha1/zz_generated.defaults.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// SPDX-License-Identifier: AGPL-3.0-only - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v0alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/pkg/apis/featuretoggle/v0alpha1/zz_generated.openapi_violation_exceptions.list b/pkg/apis/featuretoggle/v0alpha1/zz_generated.openapi_violation_exceptions.list deleted file mode 100644 index 2a3961b3eee..00000000000 --- a/pkg/apis/featuretoggle/v0alpha1/zz_generated.openapi_violation_exceptions.list +++ /dev/null @@ -1,3 +0,0 @@ -API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/featuretoggle/v0alpha1,ResolvedToggleState,Toggles -API rule violation: names_match,github.com/grafana/grafana/pkg/apis/featuretoggle/v0alpha1,FeatureSpec,FrontendOnly -API rule violation: names_match,github.com/grafana/grafana/pkg/apis/featuretoggle/v0alpha1,FeatureSpec,Owner diff --git a/pkg/apiserver/go.mod b/pkg/apiserver/go.mod index 46e76c69488..7dc7e7634c4 100644 --- a/pkg/apiserver/go.mod +++ b/pkg/apiserver/go.mod @@ -4,7 +4,7 @@ go 1.24.6 require ( github.com/google/go-cmp v0.7.0 - github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 + github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 github.com/grafana/grafana-app-sdk/logging v0.45.0 github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250514132646-acbc7b54ed9e github.com/prometheus/client_golang v1.23.2 @@ -44,7 +44,7 @@ require ( github.com/google/gnostic-models v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c // indirect + github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect @@ -95,7 +95,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/apiserver/go.sum b/pkg/apiserver/go.sum index 44e5ed38870..560e5526361 100644 --- a/pkg/apiserver/go.sum +++ b/pkg/apiserver/go.sum @@ -63,10 +63,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c h1:8GIMe1KclDdfogaeRsiU69Ev2zTF9kmjqjQqqZMzerc= -github.com/grafana/authlib v0.0.0-20250924100039-ea07223cdb6c/go.mod h1:C6CmTG6vfiqebjJswKsc6zes+1F/OtTCi6aAtL5Um6A= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781 h1:jymmOFIWnW26DeUjFgYEoltI170KeT5r1rI8a/dUf0E= -github.com/grafana/authlib/types v0.0.0-20250917093142-83a502239781/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= +github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA= +github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/grafana-app-sdk/logging v0.45.0 h1:0SH6nYZpiLBZRwUq4J6+1vo8xuHKJjnO95/2pGOoA8w= @@ -265,8 +265,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go. google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/build/cmd.go b/pkg/build/cmd.go index 1bad7828946..c3b9ec0bc50 100644 --- a/pkg/build/cmd.go +++ b/pkg/build/cmd.go @@ -18,6 +18,7 @@ import ( const ( GoOSWindows = "windows" GoOSLinux = "linux" + GoOSDarwin = "darwin" BackendBinary = "grafana" ServerBinary = "grafana-server" @@ -288,8 +289,9 @@ func setBuildEnv(opts BuildOpts) error { } } - if opts.goarch != "amd64" || opts.goos != GoOSLinux { - // needed for all other archs + if (opts.goos != GoOSLinux || opts.goarch != "amd64") && + opts.goos != GoOSDarwin { + // needed for archs other than linux/amd64 and darwin/arm64 + darwin/amd64 opts.cgo = true } @@ -307,10 +309,12 @@ func setBuildEnv(opts BuildOpts) error { } } + cgoEnabled := "0" if opts.cgo { - if err := os.Setenv("CGO_ENABLED", "1"); err != nil { - return err - } + cgoEnabled = "1" + } + if err := os.Setenv("CGO_ENABLED", cgoEnabled); err != nil { + return err } if opts.gocc == "" { diff --git a/pkg/build/go.mod b/pkg/build/go.mod index 47b17e88e05..66435b14493 100644 --- a/pkg/build/go.mod +++ b/pkg/build/go.mod @@ -17,7 +17,7 @@ require ( golang.org/x/sync v0.17.0 // @grafana/alerting-backend golang.org/x/text v0.29.0 // indirect; @grafana/grafana-backend-group google.golang.org/grpc v1.75.1 // indirect; @grafana/plugins-platform-backend - google.golang.org/protobuf v1.36.8 // indirect; @grafana/plugins-platform-backend + google.golang.org/protobuf v1.36.9 // indirect; @grafana/plugins-platform-backend ) require ( diff --git a/pkg/build/go.sum b/pkg/build/go.sum index d27999d2748..646c193b2c7 100644 --- a/pkg/build/go.sum +++ b/pkg/build/go.sum @@ -111,8 +111,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/cmd/grafana-server/commands/cli.go b/pkg/cmd/grafana-server/commands/cli.go index 8850132105f..f13b3bada3e 100644 --- a/pkg/cmd/grafana-server/commands/cli.go +++ b/pkg/cmd/grafana-server/commands/cli.go @@ -11,9 +11,7 @@ import ( "syscall" "time" - "github.com/grafana/grafana/pkg/services/featuremgmt" _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" - "github.com/urfave/cli/v2" "github.com/grafana/grafana/pkg/api" @@ -21,8 +19,10 @@ import ( "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/metrics" "github.com/grafana/grafana/pkg/infra/process" + "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/server" "github.com/grafana/grafana/pkg/services/apiserver/standalone" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/setting" ) @@ -111,6 +111,11 @@ func RunServer(opts standalone.BuildInfo, cli *cli.Context) error { return err } + // Initialize tracing early to ensure it's always available for other services + if err := tracing.InitTracing(cfg); err != nil { + return err + } + s, err := server.Initialize( cli.Context, cfg, diff --git a/pkg/expr/convert_to_full_long.go b/pkg/expr/convert_to_full_long.go index 6c17badb1d8..dd626ca6421 100644 --- a/pkg/expr/convert_to_full_long.go +++ b/pkg/expr/convert_to_full_long.go @@ -14,8 +14,8 @@ const ( SQLDisplayFieldName = "__display_name__" // These are not types in the SDK or dataplane contract yet. - numericFullLongType = "numeric_full_long" - timeseriesFullLongType = "time_series_full_long" + numericFullLongType = "numeric-full-long" + timeseriesFullLongType = "timeseries-full-long" ) func ConvertToFullLong(frames data.Frames) (data.Frames, error) { diff --git a/pkg/infra/tracing/tracing.go b/pkg/infra/tracing/tracing.go index f8b340d12df..c015569cf8a 100644 --- a/pkg/infra/tracing/tracing.go +++ b/pkg/infra/tracing/tracing.go @@ -11,6 +11,8 @@ import ( "sync" "time" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/services" jaegerpropagator "go.opentelemetry.io/contrib/propagators/jaeger" "go.opentelemetry.io/contrib/samplers/jaegerremote" "go.opentelemetry.io/otel" @@ -27,11 +29,9 @@ import ( "go.opentelemetry.io/otel/trace/noop" "google.golang.org/grpc/credentials" - "github.com/go-kit/log/level" - - "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/apimachinery/errutil" "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/setting" ) const ( @@ -105,6 +105,23 @@ func ProvideService(tracingCfg *TracingConfig) (*TracingService, error) { return ots, nil } +// InitTracing initializes the tracing service with the provided configuration. +// Used to initialize tracing early to ensure it's always available for other +// services, outside of the wire context. +func InitTracing(cfg *setting.Cfg) error { + tracingCfg, err := ParseTracingConfig(cfg) + if err != nil { + return fmt.Errorf("parse tracing config: %w", err) + } + + _, err = ProvideService(tracingCfg) + if err != nil { + return fmt.Errorf("initialize tracing: %w", err) + } + + return nil +} + func NewNoopTracerService() *TracingService { tp := &noopTracerProvider{TracerProvider: noop.NewTracerProvider()} otel.SetTracerProvider(tp) diff --git a/pkg/operators/provisioning/repo_operator.go b/pkg/operators/provisioning/repo_operator.go index 0ec243bb41e..f96653a1d87 100644 --- a/pkg/operators/provisioning/repo_operator.go +++ b/pkg/operators/provisioning/repo_operator.go @@ -11,9 +11,11 @@ import ( "github.com/grafana/grafana-app-sdk/logging" appcontroller "github.com/grafana/grafana/apps/provisioning/pkg/controller" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" "github.com/prometheus/client_golang/prometheus" "k8s.io/client-go/tools/cache" + "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/registry/apis/provisioning/controller" "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" @@ -66,8 +68,14 @@ func RunRepoController(deps server.OperatorDependencies) error { if err != nil { return fmt.Errorf("create API client job store: %w", err) } + + allowedTargets := []v0alpha1.SyncTargetType{} + for _, target := range controllerCfg.allowedTargets { + allowedTargets = append(allowedTargets, v0alpha1.SyncTargetType(target)) + } + validator := repository.NewValidator(controllerCfg.minSyncInterval, allowedTargets, controllerCfg.allowImageRendering) statusPatcher := appcontroller.NewRepositoryStatusPatcher(controllerCfg.provisioningClient.ProvisioningV0alpha1()) - healthChecker := controller.NewHealthChecker(statusPatcher, deps.Registerer) + healthChecker := controller.NewHealthChecker(statusPatcher, deps.Registerer, repository.NewSimpleRepositoryTester(validator)) repoInformer := informerFactory.Provisioning().V0alpha1().Repositories() controller, err := controller.NewRepositoryController( @@ -98,7 +106,10 @@ func RunRepoController(deps server.OperatorDependencies) error { type repoControllerConfig struct { provisioningControllerConfig - workerCount int + workerCount int + allowedTargets []string + allowImageRendering bool + minSyncInterval time.Duration } func getRepoControllerConfig(cfg *setting.Cfg, registry prometheus.Registerer) (*repoControllerConfig, error) { @@ -106,8 +117,18 @@ func getRepoControllerConfig(cfg *setting.Cfg, registry prometheus.Registerer) ( if err != nil { return nil, err } + + allowedTargets := []string{} + cfg.SectionWithEnvOverrides("provisioning").Key("allowed_targets").Strings("|") + if len(allowedTargets) == 0 { + allowedTargets = []string{"folder"} + } + return &repoControllerConfig{ provisioningControllerConfig: *controllerCfg, + allowedTargets: allowedTargets, workerCount: cfg.SectionWithEnvOverrides("operator").Key("worker_count").MustInt(1), + allowImageRendering: cfg.SectionWithEnvOverrides("provisioning").Key("allow_image_rendering").MustBool(false), + minSyncInterval: cfg.SectionWithEnvOverrides("provisioning").Key("min_sync_interval").MustDuration(1 * time.Minute), }, nil } diff --git a/pkg/plugins/manager/installer.go b/pkg/plugins/manager/installer.go index b0e96515807..894f939fd44 100644 --- a/pkg/plugins/manager/installer.go +++ b/pkg/plugins/manager/installer.go @@ -71,7 +71,7 @@ func (m *PluginInstaller) Add(ctx context.Context, pluginID, version string, opt for _, dep := range archive.Dependencies { m.log.Info(fmt.Sprintf("Fetching %s dependency %s...", pluginID, dep.ID)) - err = m.Add(ctx, dep.ID, "", opts) + err = m.Add(ctx, dep.ID, "", plugins.NewAddOpts(opts.GrafanaVersion(), opts.OS(), opts.Arch(), "")) if err != nil { var dupeErr plugins.DuplicateError if errors.As(err, &dupeErr) { diff --git a/pkg/plugins/manager/installer_test.go b/pkg/plugins/manager/installer_test.go index 6a7962ca5a9..61a9f5b245d 100644 --- a/pkg/plugins/manager/installer_test.go +++ b/pkg/plugins/manager/installer_test.go @@ -392,6 +392,73 @@ func TestPluginManager_Add_Remove(t *testing.T) { require.NoError(t, err) require.Equal(t, []string{"test-plugin.zip"}, loadedPaths) }) + + t.Run("Dependencies don't inherit parent plugin's URL during installation", func(t *testing.T) { + const ( + parentPluginID = "parent-plugin" + depPluginID = "dependency-plugin" + parentURL = "https://example.com/parent-plugin.zip" + ) + + var loadedPaths []string + loader := &fakes.FakeLoader{ + LoadFunc: func(ctx context.Context, src plugins.PluginSource) ([]*plugins.Plugin, error) { + // Check if this is a LocalSource and get its paths + if localSrc, ok := src.(*sources.LocalSource); ok { + loadedPaths = append(loadedPaths, localSrc.Paths()...) + } + return []*plugins.Plugin{}, nil + }, + } + + // Track which methods are called to ensure dependencies use catalog, not URL + urlMethodCalled := false + catalogMethodCalled := false + + pluginRepo := &fakes.FakePluginRepo{ + GetPluginArchiveByURLFunc: func(_ context.Context, url string, _ repo.CompatOpts) (*repo.PluginArchive, error) { + urlMethodCalled = true + require.Equal(t, parentURL, url, "URL method should only be called for parent plugin") + return &repo.PluginArchive{File: &zip.ReadCloser{Reader: zip.Reader{File: []*zip.File{{ + FileHeader: zip.FileHeader{Name: "parent-plugin.zip"}, + }}}}}, nil + }, + GetPluginArchiveFunc: func(_ context.Context, id, version string, _ repo.CompatOpts) (*repo.PluginArchive, error) { + catalogMethodCalled = true + require.Equal(t, depPluginID, id, "Catalog method should only be called for dependency plugin") + return &repo.PluginArchive{File: &zip.ReadCloser{Reader: zip.Reader{File: []*zip.File{{ + FileHeader: zip.FileHeader{Name: "dependency-plugin.zip"}, + }}}}}, nil + }, + } + + fs := &fakes.FakePluginStorage{ + ExtractFunc: func(_ context.Context, id string, _ storage.DirNameGeneratorFunc, z *zip.ReadCloser) (*storage.ExtractedPluginArchive, error) { + switch id { + case parentPluginID: + return &storage.ExtractedPluginArchive{ + ID: parentPluginID, + Dependencies: []*storage.Dependency{{ID: depPluginID}}, + Path: "parent-plugin.zip", + }, nil + case depPluginID: + return &storage.ExtractedPluginArchive{ + ID: depPluginID, + Path: "dependency-plugin.zip", + }, nil + default: + return nil, fmt.Errorf("unknown plugin %s", id) + } + }, + } + + inst := New(&config.PluginManagementCfg{}, fakes.NewFakePluginRegistry(), loader, pluginRepo, fs, storage.SimpleDirNameGeneratorFunc, &fakes.FakeAuthService{}) + err := inst.Add(context.Background(), parentPluginID, "", plugins.NewAddOpts("10.0.0", runtime.GOOS, runtime.GOARCH, parentURL)) + require.NoError(t, err) + require.Equal(t, []string{"dependency-plugin.zip", "parent-plugin.zip"}, loadedPaths) + require.True(t, urlMethodCalled) + require.True(t, catalogMethodCalled) + }) } func createPlugin(t *testing.T, pluginID string, class plugins.Class, managed, backend bool, cbs ...func(*plugins.Plugin)) *plugins.Plugin { diff --git a/pkg/promlib/go.mod b/pkg/promlib/go.mod index c755aca1d68..b9ef0057991 100644 --- a/pkg/promlib/go.mod +++ b/pkg/promlib/go.mod @@ -12,7 +12,7 @@ require ( github.com/stretchr/testify v1.11.1 go.opentelemetry.io/otel v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 - google.golang.org/protobuf v1.36.8 + google.golang.org/protobuf v1.36.9 k8s.io/apimachinery v0.34.1 ) diff --git a/pkg/promlib/go.sum b/pkg/promlib/go.sum index 3de5e711667..bd47fe31699 100644 --- a/pkg/promlib/go.sum +++ b/pkg/promlib/go.sum @@ -420,8 +420,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/promlib/models/query.go b/pkg/promlib/models/query.go index ae56d929ec2..ec32b7bc3cd 100644 --- a/pkg/promlib/models/query.go +++ b/pkg/promlib/models/query.go @@ -1,6 +1,7 @@ package models import ( + "context" "embed" "encoding/json" "fmt" @@ -15,6 +16,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + glog "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana/pkg/promlib/intervalv2" ) @@ -190,7 +192,7 @@ type internalQueryModel struct { Interval string `json:"interval,omitempty"` } -func Parse(span trace.Span, query backend.DataQuery, dsScrapeInterval string, intervalCalculator intervalv2.Calculator, fromAlert bool, enableScope bool) (*Query, error) { +func Parse(ctx context.Context, log glog.Logger, span trace.Span, query backend.DataQuery, dsScrapeInterval string, intervalCalculator intervalv2.Calculator, fromAlert bool, enableScope bool) (*Query, error) { model := &internalQueryModel{} if err := json.Unmarshal(query.JSON, model); err != nil { return nil, err @@ -241,6 +243,7 @@ func Parse(span trace.Span, query backend.DataQuery, dsScrapeInterval string, in } if len(scopeFilters) > 0 || len(model.AdhocFilters) > 0 || len(model.GroupByKeys) > 0 { + log.Info("Applying scope filters", "scopeFiltersCount", len(scopeFilters), "adhocFiltersCount", len(model.AdhocFilters), "groupByKeysCount", len(model.GroupByKeys)) expr, err = ApplyFiltersAndGroupBy(expr, scopeFilters, model.AdhocFilters, model.GroupByKeys) if err != nil { return nil, err diff --git a/pkg/promlib/models/query_test.go b/pkg/promlib/models/query_test.go index 8826e0c37c5..cc2b2637442 100644 --- a/pkg/promlib/models/query_test.go +++ b/pkg/promlib/models/query_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" + "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana/pkg/promlib/intervalv2" "github.com/grafana/grafana/pkg/promlib/models" ) @@ -44,7 +45,7 @@ func TestParse(t *testing.T) { RefID: "A", } - res, err := models.Parse(span, q, "15s", intervalCalculator, true, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, true, false) require.NoError(t, err) require.Equal(t, false, res.ExemplarQuery) }) @@ -61,7 +62,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Second*30, res.Step) }) @@ -79,7 +80,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Second*15, res.Step) }) @@ -97,7 +98,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Minute*20, res.Step) }) @@ -115,7 +116,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Minute*2, res.Step) }) @@ -133,7 +134,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "240s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "240s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, time.Minute*4, res.Step) }) @@ -152,7 +153,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [2m]})", res.Expr) require.Equal(t, 120*time.Second, res.Step) @@ -173,7 +174,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [2m]})", res.Expr) }) @@ -192,7 +193,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [120000]})", res.Expr) }) @@ -211,7 +212,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [120000]}) + rate(ALERTS{job=\"test\" [2m]})", res.Expr) }) @@ -230,7 +231,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [120000]}) + rate(ALERTS{job=\"test\" [2m]})", res.Expr) }) @@ -248,7 +249,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800s]})", res.Expr) }) @@ -266,7 +267,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800]})", res.Expr) }) @@ -284,7 +285,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800s]})", res.Expr) }) @@ -302,7 +303,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [0]})", res.Expr) }) @@ -320,7 +321,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [1]})", res.Expr) }) @@ -338,7 +339,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [172800000]})", res.Expr) }) @@ -356,7 +357,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [20]})", res.Expr) }) @@ -375,7 +376,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [20m0s]})", res.Expr) }) @@ -394,7 +395,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 1*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [1m0s]})", res.Expr) require.Equal(t, 1*time.Minute, res.Step) @@ -413,7 +414,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [135000]})", res.Expr) }) @@ -431,7 +432,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [135000]}) + rate(ALERTS{job=\"test\" [2m15s]})", res.Expr) }) @@ -450,7 +451,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "A", res.RefId) }) @@ -468,7 +469,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, 2*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "rate(ALERTS{job=\"test\" [135000]}) + rate(ALERTS{job=\"test\" [2m15s]})", res.Expr) }) @@ -487,7 +488,7 @@ func TestParse(t *testing.T) { "range": true }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, true, res.RangeQuery) }) @@ -507,7 +508,7 @@ func TestParse(t *testing.T) { "instant": true }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, true, res.RangeQuery) require.Equal(t, true, res.InstantQuery) @@ -526,7 +527,7 @@ func TestParse(t *testing.T) { "refId": "A" }`, timeRange, time.Duration(1)*time.Minute) - res, err := models.Parse(span, q, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, true, res.RangeQuery) }) @@ -659,7 +660,7 @@ func TestRateInterval(t *testing.T) { t.Run(tt.name, func(t *testing.T) { q := mockQuery(tt.args.expr, tt.args.interval, tt.args.intervalMs, tt.args.timeRange) q.MaxDataPoints = 12384 - res, err := models.Parse(span, q, tt.args.dsScrapeInterval, intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, q, tt.args.dsScrapeInterval, intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, tt.want.Expr, res.Expr) require.Equal(t, tt.want.Step, res.Step) @@ -694,7 +695,7 @@ func TestRateInterval(t *testing.T) { "utcOffsetSec":3600 }`), } - res, err := models.Parse(span, query, "30s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, query, "30s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "sum(rate(process_cpu_seconds_total[2m0s]))", res.Expr) require.Equal(t, 30*time.Second, res.Step) @@ -729,7 +730,7 @@ func TestRateInterval(t *testing.T) { "maxDataPoints": 1055 }`), } - res, err := models.Parse(span, query, "15s", intervalCalculator, false, false) + res, err := models.Parse(context.Background(), log.New(), span, query, "15s", intervalCalculator, false, false) require.NoError(t, err) require.Equal(t, "sum(rate(cache_requests_total[1m0s]))", res.Expr) require.Equal(t, 15*time.Second, res.Step) diff --git a/pkg/promlib/querydata/request.go b/pkg/promlib/querydata/request.go index 5f3f9556648..ff3cefc4458 100644 --- a/pkg/promlib/querydata/request.go +++ b/pkg/promlib/querydata/request.go @@ -129,7 +129,7 @@ func (s *QueryData) handleQuery(ctx context.Context, bq backend.DataQuery, fromA hasPromQLScopeFeatureFlag bool) *backend.DataResponse { traceCtx, span := s.tracer.Start(ctx, "datasource.prometheus") defer span.End() - query, err := models.Parse(span, bq, s.TimeInterval, s.intervalCalculator, fromAlert, hasPromQLScopeFeatureFlag) + query, err := models.Parse(ctx, s.log, span, bq, s.TimeInterval, s.intervalCalculator, fromAlert, hasPromQLScopeFeatureFlag) if err != nil { return &backend.DataResponse{ Error: err, @@ -145,7 +145,7 @@ func (s *QueryData) handleQuery(ctx context.Context, bq backend.DataQuery, fromA func (s *QueryData) fetch(traceCtx context.Context, client *client.Client, q *models.Query) *backend.DataResponse { logger := s.log.FromContext(traceCtx) - logger.Debug("Sending query", "start", q.Start, "end", q.End, "step", q.Step, "query", q.Expr /*, "queryTimeout", s.QueryTimeout*/) + logger.Debug("Sending query", "start", q.Start, "end", q.End, "step", q.Step, "query", q.Expr) dr := &backend.DataResponse{ Frames: data.Frames{}, diff --git a/pkg/registry/apis/dashboard/legacy/token.go b/pkg/registry/apis/dashboard/legacy/token.go index a3bc0e5fb90..a5de881550c 100644 --- a/pkg/registry/apis/dashboard/legacy/token.go +++ b/pkg/registry/apis/dashboard/legacy/token.go @@ -44,7 +44,9 @@ func readContinueToken(next string) (continueToken, error) { if sub[0] != "folder" { return token, fmt.Errorf("expected folder UID in third slug") } - token.folder = sub[1] + if len(sub) > 1 { + token.folder = sub[1] + } // // Check if the folder filter is the same from the previous query // if q.Requirements.Folder == nil { @@ -59,6 +61,5 @@ func readContinueToken(next string) (continueToken, error) { } func (r *continueToken) String() string { - return fmt.Sprintf("org:%d/start:%d/folder:%s", - r.orgId, r.id, r.folder) + return fmt.Sprintf("org:%d/start:%d/folder:%s", r.orgId, r.id, r.folder) } diff --git a/pkg/registry/apis/dashboard/legacy/token_test.go b/pkg/registry/apis/dashboard/legacy/token_test.go new file mode 100644 index 00000000000..e8bdf9d5402 --- /dev/null +++ b/pkg/registry/apis/dashboard/legacy/token_test.go @@ -0,0 +1,118 @@ +package legacy + +import ( + "testing" +) + +func TestReadContinueToken(t *testing.T) { + tests := []struct { + name string + input string + wantToken continueToken + wantErr bool + }{ + { + name: "empty token", + input: "", + wantToken: continueToken{ + orgId: 0, + id: 0, + folder: "", + }, + wantErr: false, + }, + { + name: "too few parts", + input: "org:1/start:2", + wantErr: true, + }, + { + name: "invalid org slug", + input: "foo:1/start:2/folder:abc", + wantErr: true, + }, + { + name: "invalid org id", + input: "org:abc/start:2/folder:abc", + wantErr: true, + }, + { + name: "invalid start slug", + input: "org:1/foo:2/folder:abc", + wantErr: true, + }, + { + name: "invalid start id", + input: "org:1/start:abc/folder:abc", + wantErr: true, + }, + { + name: "invalid folder slug", + input: "org:1/start:2/foo:abc", + wantErr: true, + }, + { + name: "valid token", + input: "org:42/start:100/folder:my-folder", + wantToken: continueToken{ + orgId: 42, + id: 100, + folder: "my-folder", + }, + wantErr: false, + }, + { + name: "valid token with empty folder", + input: "org:42/start:100/folder:", + wantToken: continueToken{ + orgId: 42, + id: 100, + folder: "", + }, + wantErr: false, + }, + { + name: "folder without value", + input: "org:42/start:100/folder", // missing trailing ":" + wantToken: continueToken{ + orgId: 42, + id: 100, + folder: "", + }, + wantErr: false, + }, + { + name: "missing folder", + input: "org:42/start:100", + wantToken: continueToken{ + orgId: 42, + id: 100, + folder: "", + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + token, err := readContinueToken(tt.input) + if (err != nil) != tt.wantErr { + t.Errorf("readContinueToken() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr { + if token.orgId != tt.wantToken.orgId || token.id != tt.wantToken.id || token.folder != tt.wantToken.folder { + t.Errorf("readContinueToken() got = %+v, want %+v", token, tt.wantToken) + } + } + }) + } +} + +func TestContinueToken_String(t *testing.T) { + token := continueToken{orgId: 5, id: 10, folder: "abc"} + want := "org:5/start:10/folder:abc" + if got := token.String(); got != want { + t.Errorf("continueToken.String() = %q, want %q", got, want) + } +} diff --git a/pkg/registry/apis/dashboard/register.go b/pkg/registry/apis/dashboard/register.go index 21ce04ec92d..a8e5de42e39 100644 --- a/pkg/registry/apis/dashboard/register.go +++ b/pkg/registry/apis/dashboard/register.go @@ -16,6 +16,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/dynamic" "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" @@ -97,7 +98,8 @@ type DashboardsAPIBuilder struct { unified resource.ResourceClient dashboardProvisioningService dashboards.DashboardProvisioningService dashboardPermissions dashboards.PermissionsRegistrationService - dashboardPermissionsSvc accesscontrol.DashboardPermissionsService + dashboardPermissionsSvc accesscontrol.DashboardPermissionsService // TODO: once kubernetesAuthzResourcePermissionApis is enabled, rely solely on resourcePermissionsSvc and add integration test afterDelete hook + resourcePermissionsSvc *dynamic.NamespaceableResourceInterface scheme *runtime.Scheme search *SearchHandler dashStore dashboards.Store @@ -171,17 +173,17 @@ func RegisterAPIService( return builder } -func NewAPIService(ac authlib.AccessClient, features featuremgmt.FeatureToggles, folderClientProvider client.K8sHandlerProvider, datasourceProvider schemaversion.DataSourceInfoProvider) *DashboardsAPIBuilder { +func NewAPIService(ac authlib.AccessClient, features featuremgmt.FeatureToggles, folderClientProvider client.K8sHandlerProvider, datasourceProvider schemaversion.DataSourceInfoProvider, resourcePermissionsSvc *dynamic.NamespaceableResourceInterface) *DashboardsAPIBuilder { migration.Initialize(datasourceProvider) return &DashboardsAPIBuilder{ - minRefreshInterval: "10s", - accessClient: ac, - authorizer: authsvc.NewResourceAuthorizer(ac), - features: features, - dashboardService: &dashsvc.DashboardServiceImpl{}, // for validation helpers only - folderClientProvider: folderClientProvider, - - isStandalone: true, + minRefreshInterval: "10s", + accessClient: ac, + authorizer: authsvc.NewResourceAuthorizer(ac), + features: features, + dashboardService: &dashsvc.DashboardServiceImpl{}, // for validation helpers only + folderClientProvider: folderClientProvider, + resourcePermissionsSvc: resourcePermissionsSvc, + isStandalone: true, } } @@ -461,6 +463,7 @@ func (b *DashboardsAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver RequireDeprecatedInternalID: true, } + // TODO: merge this into one option if b.isStandalone { // TODO: Sets default root permissions } else { @@ -563,11 +566,12 @@ func (b *DashboardsAPIBuilder) storageForVersion( apiGroupInfo.VersionedResourcesStorageMap[dashboards.GroupVersion().Version] = storage if b.isStandalone { - store, err := grafanaregistry.NewRegistryStore(opts.Scheme, dashboards, opts.OptsGetter) + unified, err := grafanaregistry.NewRegistryStore(opts.Scheme, dashboards, opts.OptsGetter) if err != nil { return err } - storage[dashboards.StoragePath()] = store + unified.AfterDelete = b.afterDelete + storage[dashboards.StoragePath()] = unified return nil } @@ -577,13 +581,14 @@ func (b *DashboardsAPIBuilder) storageForVersion( return err } - store, err := grafanaregistry.NewRegistryStore(opts.Scheme, dashboards, opts.OptsGetter) + unified, err := grafanaregistry.NewRegistryStore(opts.Scheme, dashboards, opts.OptsGetter) if err != nil { return err } + unified.AfterDelete = b.afterDelete gr := dashboards.GroupResource() - dw, err := opts.DualWriteBuilder(gr, legacyStore, store) + dw, err := opts.DualWriteBuilder(gr, legacyStore, unified) if err != nil { return err } @@ -629,6 +634,28 @@ func (b *DashboardsAPIBuilder) storageForVersion( return nil } +func (b *DashboardsAPIBuilder) afterDelete(obj runtime.Object, _ *metav1.DeleteOptions) { + if util.IsInterfaceNil(b.resourcePermissionsSvc) { + return + } + + ctx := context.Background() + log := logging.DefaultLogger + meta, err := utils.MetaAccessor(obj) + if err != nil { + log.Error("Failed to access deleted dashboard object metadata", "error", err) + return + } + + log.Debug("deleting dashboard permissions", "uid", meta.GetName(), "namespace", meta.GetNamespace()) + client := (*b.resourcePermissionsSvc).Namespace(meta.GetNamespace()) + name := fmt.Sprintf("%s-%s-%s", dashv1.DashboardResourceInfo.GroupVersionResource().Group, dashv1.DashboardResourceInfo.GroupVersionResource().Resource, meta.GetName()) + err = client.Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Error("failed to delete dashboard permissions", "error", err) + } +} + func (b *DashboardsAPIBuilder) GetOpenAPIDefinitions() common.GetOpenAPIDefinitions { return func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { defs := dashv0.GetOpenAPIDefinitions(ref) diff --git a/pkg/registry/apis/datasource/register.go b/pkg/registry/apis/datasource/register.go index dc31d8cbd9e..8d24e69a03e 100644 --- a/pkg/registry/apis/datasource/register.go +++ b/pkg/registry/apis/datasource/register.go @@ -3,7 +3,9 @@ package datasource import ( "context" "encoding/json" + "errors" "fmt" + "path/filepath" "github.com/prometheus/client_golang/prometheus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,14 +22,16 @@ import ( "github.com/grafana/grafana/pkg/apimachinery/utils" datasource "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1" query "github.com/grafana/grafana/pkg/apis/query/v0alpha1" + "github.com/grafana/grafana/pkg/configprovider" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" + "github.com/grafana/grafana/pkg/plugins/manager/sources" "github.com/grafana/grafana/pkg/promlib/models" "github.com/grafana/grafana/pkg/registry/apis/query/queryschema" "github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/apiserver/builder" "github.com/grafana/grafana/pkg/services/featuremgmt" - "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore" + "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/tsdb/grafana-testdata-datasource/kinds" ) @@ -47,12 +51,12 @@ type DataSourceAPIBuilder struct { } func RegisterAPIService( + cfgProvider configprovider.ConfigProvider, features featuremgmt.FeatureToggles, apiRegistrar builder.APIRegistrar, pluginClient plugins.Client, // access to everything datasources ScopedPluginDatasourceProvider, contextProvider PluginContextWrapper, - pluginStore pluginstore.Store, accessControl accesscontrol.AccessControl, reg prometheus.Registerer, ) (*DataSourceAPIBuilder, error) { @@ -66,25 +70,43 @@ func RegisterAPIService( var err error var builder *DataSourceAPIBuilder - all := pluginStore.Plugins(context.Background(), plugins.TypeDataSource) + + cfg, err := cfgProvider.Get(context.Background()) + if err != nil { + return nil, err + } + pluginJSONs, err := getCorePlugins(cfg) + if err != nil { + return nil, err + } + ids := []string{ "grafana-testdata-datasource", "prometheus", "graphite", } - for _, ds := range all { - if explictPluginList && !slices.Contains(ids, ds.ID) { + for _, pluginJSON := range pluginJSONs { + if explictPluginList && !slices.Contains(ids, pluginJSON.ID) { continue // skip this one } - if !ds.Backend { + if !pluginJSON.Backend { continue // skip frontend only plugins } - builder, err = NewDataSourceAPIBuilder(ds.JSONData, - pluginClient, - datasources.GetDatasourceProvider(ds.JSONData), + if pluginJSON.Type != plugins.TypeDataSource { + continue // skip non-datasource plugins + } + + client, ok := pluginClient.(PluginClient) + if !ok { + return nil, fmt.Errorf("plugin client is not a PluginClient: %T", pluginClient) + } + + builder, err = NewDataSourceAPIBuilder(pluginJSON, + client, + datasources.GetDatasourceProvider(pluginJSON), contextProvider, accessControl, features.IsEnabledGlobally(featuremgmt.FlagDatasourceQueryTypes), @@ -277,3 +299,22 @@ func (b *DataSourceAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.Op return oas, err } + +func getCorePlugins(cfg *setting.Cfg) ([]plugins.JSONData, error) { + coreDataSourcesPath := filepath.Join(cfg.StaticRootPath, "app", "plugins", "datasource") + coreDataSourcesSrc := sources.NewLocalSource( + plugins.ClassCore, + []string{coreDataSourcesPath}, + ) + + res, err := coreDataSourcesSrc.Discover(context.Background()) + if err != nil { + return nil, errors.New("failed to load core data source plugins") + } + + pluginJSONs := make([]plugins.JSONData, 0, len(res)) + for _, p := range res { + pluginJSONs = append(pluginJSONs, p.Primary.JSONData) + } + return pluginJSONs, nil +} diff --git a/pkg/registry/apis/datasource/sub_query.go b/pkg/registry/apis/datasource/sub_query.go index 51f8b90d430..258447418bc 100644 --- a/pkg/registry/apis/datasource/sub_query.go +++ b/pkg/registry/apis/datasource/sub_query.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1" + "github.com/grafana/grafana/pkg/apimachinery/errutil" query "github.com/grafana/grafana/pkg/apis/query/v0alpha1" query_headers "github.com/grafana/grafana/pkg/registry/apis/query" "github.com/grafana/grafana/pkg/services/datasources" @@ -96,6 +97,22 @@ func (r *subQueryREST) Connect(ctx context.Context, name string, opts runtime.Ob PluginContext: pluginCtx, Headers: query_headers.ExtractKnownHeaders(req.Header), }) + + // all errors get converted into k8 errors when sent in responder.Error and lose important context like downstream info + var e errutil.Error + if errors.As(err, &e) && e.Source == errutil.SourceDownstream { + responder.Object(int(backend.StatusBadRequest), + &query.QueryDataResponse{QueryDataResponse: backend.QueryDataResponse{Responses: map[string]backend.DataResponse{ + "A": { + Error: errors.New(e.LogMessage), + ErrorSource: backend.ErrorSourceDownstream, + Status: backend.StatusBadRequest, + }, + }}}, + ) + return + } + if err != nil { responder.Error(err) return diff --git a/pkg/registry/apis/folders/hooks.go b/pkg/registry/apis/folders/hooks.go index 9adf40c1293..bf8f042769f 100644 --- a/pkg/registry/apis/folders/hooks.go +++ b/pkg/registry/apis/folders/hooks.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic/registry" @@ -94,7 +95,7 @@ func (b *FolderAPIBuilder) afterDelete(obj runtime.Object, _ *metav1.DeleteOptio log.Debug("deleting folder permissions", "uid", meta.GetName(), "namespace", meta.GetNamespace()) client := (*b.resourcePermissionsSvc).Namespace(meta.GetNamespace()) err := client.Delete(ctx, fmt.Sprintf("%s-%s-%s", folders.FolderResourceInfo.GroupVersionResource().Group, folders.FolderResourceInfo.GroupVersionResource().Resource, meta.GetName()), metav1.DeleteOptions{}) - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { log.Error("failed to delete folder permissions", "error", err) } return diff --git a/pkg/registry/apis/folders/sub_access.go b/pkg/registry/apis/folders/sub_access.go index ff068a14a33..314d9318444 100644 --- a/pkg/registry/apis/folders/sub_access.go +++ b/pkg/registry/apis/folders/sub_access.go @@ -87,8 +87,7 @@ func (r *subAccessREST) getAccessInfo(ctx context.Context, name string) (*folder Resource: foldersV1.RESOURCE, Namespace: ns.Value, Name: name, - Folder: obj.GetFolder(), - }) + }, obj.GetFolder()) return tmp.Allowed } diff --git a/pkg/registry/apis/folders/validate.go b/pkg/registry/apis/folders/validate.go index fe4fbb75b51..ca99aaa0a30 100644 --- a/pkg/registry/apis/folders/validate.go +++ b/pkg/registry/apis/folders/validate.go @@ -94,6 +94,11 @@ func validateOnUpdate(ctx context.Context, // Validate the move operation newParent := folderObj.GetFolder() + // If we move to root, we don't need to validate the depth. + if newParent == folder.RootFolderUID { + return nil + } + // folder cannot be moved to a k6 folder if newParent == accesscontrol.K6FolderUID { return fmt.Errorf("k6 project may not be moved") @@ -117,7 +122,7 @@ func validateOnUpdate(ctx context.Context, // if by moving a folder we exceed the max depth, return an error if len(info.Items)+1 >= maxDepth { - return folder.ErrMaximumDepthReached + return folder.ErrMaximumDepthReached.Errorf("maximum folder depth reached") } return nil } @@ -141,7 +146,7 @@ func validateOnDelete(ctx context.Context, for _, v := range resp.Stats { if v.Count > 0 { - return folder.ErrFolderNotEmpty + return folder.ErrFolderNotEmpty.Errorf("folder is not empty, contains %d resources", v.Count) } } return nil diff --git a/pkg/registry/apis/iam/legacy/sql.go b/pkg/registry/apis/iam/legacy/sql.go index 3f2156d26a2..58d7c9f1fd3 100644 --- a/pkg/registry/apis/iam/legacy/sql.go +++ b/pkg/registry/apis/iam/legacy/sql.go @@ -31,6 +31,7 @@ type LegacyIdentityStore interface { GetTeamInternalID(ctx context.Context, ns claims.NamespaceInfo, query GetTeamInternalIDQuery) (*GetTeamInternalIDResult, error) CreateTeam(ctx context.Context, ns claims.NamespaceInfo, cmd CreateTeamCommand) (*CreateTeamResult, error) + UpdateTeam(ctx context.Context, ns claims.NamespaceInfo, cmd UpdateTeamCommand) (*UpdateTeamResult, error) ListTeams(ctx context.Context, ns claims.NamespaceInfo, query ListTeamQuery) (*ListTeamResult, error) DeleteTeam(ctx context.Context, ns claims.NamespaceInfo, cmd DeleteTeamCommand) error ListTeamBindings(ctx context.Context, ns claims.NamespaceInfo, query ListTeamBindingsQuery) (*ListTeamBindingsResult, error) diff --git a/pkg/registry/apis/iam/legacy/sql_test.go b/pkg/registry/apis/iam/legacy/sql_test.go index d9187c163b5..7c4fad74ba8 100644 --- a/pkg/registry/apis/iam/legacy/sql_test.go +++ b/pkg/registry/apis/iam/legacy/sql_test.go @@ -60,6 +60,12 @@ func TestIdentityQueries(t *testing.T) { return &v } + updateTeam := func(cmd *UpdateTeamCommand) sqltemplate.SQLTemplate { + v := newUpdateTeam(nodb, cmd) + v.SQLTemplate = mocks.NewTestingSQLTemplate() + return &v + } + listTeams := func(q *ListTeamQuery) sqltemplate.SQLTemplate { v := newListTeams(nodb, q) v.SQLTemplate = mocks.NewTestingSQLTemplate() @@ -393,6 +399,19 @@ func TestIdentityQueries(t *testing.T) { }), }, }, + sqlUpdateTeamTemplate: { + { + Name: "update_team_basic", + Data: updateTeam(&UpdateTeamCommand{ + UID: "team-1", + Name: "Team 1", + Email: "team1@example.com", + IsProvisioned: true, + ExternalUID: "team-1-uid", + Updated: NewDBTime(time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC)), + }), + }, + }, sqlDeleteTeamTemplate: { { Name: "delete_team_basic", diff --git a/pkg/registry/apis/iam/legacy/team.go b/pkg/registry/apis/iam/legacy/team.go index a52f7658df9..6f49707d5d0 100644 --- a/pkg/registry/apis/iam/legacy/team.go +++ b/pkg/registry/apis/iam/legacy/team.go @@ -265,6 +265,91 @@ func (s *legacySQLStore) CreateTeam(ctx context.Context, ns claims.NamespaceInfo return &CreateTeamResult{Team: createdTeam}, nil } +type UpdateTeamCommand struct { + UID string + Name string + Updated DBTime + Email string + ExternalID string + IsProvisioned bool + ExternalUID string +} + +type UpdateTeamResult struct { + Team team.Team +} + +var sqlUpdateTeamTemplate = mustTemplate("update_team.sql") + +func newUpdateTeam(sql *legacysql.LegacyDatabaseHelper, cmd *UpdateTeamCommand) updateTeamQuery { + return updateTeamQuery{ + SQLTemplate: sqltemplate.New(sql.DialectForDriver()), + TeamTable: sql.Table("team"), + Command: cmd, + } +} + +type updateTeamQuery struct { + sqltemplate.SQLTemplate + TeamTable string + Command *UpdateTeamCommand +} + +func (r updateTeamQuery) Validate() error { + return nil +} + +func (s *legacySQLStore) UpdateTeam(ctx context.Context, ns claims.NamespaceInfo, cmd UpdateTeamCommand) (*UpdateTeamResult, error) { + now := time.Now().UTC().Truncate(time.Second) + + cmd.Updated = NewDBTime(now) + + sql, err := s.sql(ctx) + if err != nil { + return nil, err + } + + req := newUpdateTeam(sql, &cmd) + + var updatedTeam team.Team + err = sql.DB.GetSqlxSession().WithTransaction(ctx, func(st *session.SessionTx) error { + _, err := s.GetTeamInternalID(ctx, ns, GetTeamInternalIDQuery{ + OrgID: ns.OrgID, + UID: cmd.UID, + }) + if err != nil { + return fmt.Errorf("team not found: %w", err) + } + + teamQuery, err := sqltemplate.Execute(sqlUpdateTeamTemplate, req) + if err != nil { + return fmt.Errorf("failed to execute team update template %q: %w", sqlUpdateTeamTemplate.Name(), err) + } + + _, err = st.Exec(ctx, teamQuery, req.GetArgs()...) + if err != nil { + return fmt.Errorf("failed to update team: %w", err) + } + + updatedTeam = team.Team{ + UID: cmd.UID, + Name: cmd.Name, + Email: cmd.Email, + ExternalUID: cmd.ExternalUID, + IsProvisioned: cmd.IsProvisioned, + Updated: cmd.Updated.Time, + } + + return nil + }) + + if err != nil { + return nil, err + } + + return &UpdateTeamResult{Team: updatedTeam}, nil +} + type DeleteTeamCommand struct { UID string } @@ -336,7 +421,7 @@ type ListTeamBindingsQuery struct { } type ListTeamBindingsResult struct { - Bindings []TeamBinding + Bindings []TeamMember Continue int64 RV int64 } @@ -360,11 +445,6 @@ func (m TeamMember) MemberID() string { return claims.NewTypeID(claims.TypeUser, m.UserUID) } -type TeamBinding struct { - TeamUID string - Members []TeamMember -} - var sqlQueryTeamBindingsTemplate = mustTemplate("team_bindings_query.sql") type listTeamBindingsQuery struct { @@ -420,11 +500,11 @@ func (s *legacySQLStore) ListTeamBindings(ctx context.Context, ns claims.Namespa return nil, err } - res := &ListTeamBindingsResult{} - grouped := map[string][]TeamMember{} + res := &ListTeamBindingsResult{ + Bindings: make([]TeamMember, 0, int(query.Pagination.Limit)), + } var lastID int64 - var atTeamLimit bool for rows.Next() { m := TeamMember{} @@ -433,16 +513,11 @@ func (s *legacySQLStore) ListTeamBindings(ctx context.Context, ns claims.Namespa return res, err } - lastID = m.TeamID - members, ok := grouped[m.TeamUID] - if ok { - grouped[m.TeamUID] = append(members, m) - } else if !atTeamLimit { - grouped[m.TeamUID] = []TeamMember{m} - } + res.Bindings = append(res.Bindings, m) - if len(grouped) >= int(query.Pagination.Limit)-1 { - atTeamLimit = true + lastID = m.ID + + if len(res.Bindings) >= int(query.Pagination.Limit)-1 { res.Continue = lastID } } @@ -451,14 +526,6 @@ func (s *legacySQLStore) ListTeamBindings(ctx context.Context, ns claims.Namespa res.RV, err = sql.GetResourceVersion(ctx, "team_member", "updated") } - res.Bindings = make([]TeamBinding, 0, len(grouped)) - for uid, members := range grouped { - res.Bindings = append(res.Bindings, TeamBinding{ - TeamUID: uid, - Members: members, - }) - } - return res, err } diff --git a/pkg/registry/apis/iam/legacy/team_bindings_query.sql b/pkg/registry/apis/iam/legacy/team_bindings_query.sql index fb8930c9fde..a64687a7e8f 100644 --- a/pkg/registry/apis/iam/legacy/team_bindings_query.sql +++ b/pkg/registry/apis/iam/legacy/team_bindings_query.sql @@ -3,18 +3,13 @@ FROM {{ .Ident .TeamMemberTable }} tm INNER JOIN {{ .Ident .TeamTable }} t ON tm.team_id = t.id INNER JOIN {{ .Ident .UserTable }} u ON tm.user_id = u.id WHERE -{{ if .Query.UID }} - t.uid = {{ .Arg .Query.UID }} -{{ else }} - t.uid IN( - SELECT uid - FROM {{ .Ident .TeamTable }} t - {{ if .Query.Pagination.Continue }} - WHERE t.id >= {{ .Arg .Query.Pagination.Continue }} - {{ end }} - ORDER BY t.id ASC LIMIT {{ .Arg .Query.Pagination.Limit }} - ) -{{ end }} -AND tm.org_id = {{ .Arg .Query.OrgID}} + tm.org_id = {{ .Arg .Query.OrgID}} + {{ if .Query.UID }} + AND t.uid = {{ .Arg .Query.UID }} + {{ end }} + {{- if .Query.Pagination.Continue }} + AND tm.id >= {{ .Arg .Query.Pagination.Continue }} + {{- end }} AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT {{ .Arg .Query.Pagination.Limit }}; diff --git a/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_1_bindings.sql b/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_1_bindings.sql index 4dc43b4f936..1b3c18c20cc 100755 --- a/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_1_bindings.sql +++ b/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_1_bindings.sql @@ -3,7 +3,8 @@ FROM `grafana`.`team_member` tm INNER JOIN `grafana`.`team` t ON tm.team_id = t.id INNER JOIN `grafana`.`user` u ON tm.user_id = u.id WHERE - t.uid = 'team-1' -AND tm.org_id = 1 + tm.org_id = 1 + AND t.uid = 'team-1' AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 1; diff --git a/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_1.sql b/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_1.sql index be3d212eb9e..2180553e3a0 100755 --- a/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_1.sql +++ b/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_1.sql @@ -3,11 +3,7 @@ FROM `grafana`.`team_member` tm INNER JOIN `grafana`.`team` t ON tm.team_id = t.id INNER JOIN `grafana`.`user` u ON tm.user_id = u.id WHERE - t.uid IN( - SELECT uid - FROM `grafana`.`team` t - ORDER BY t.id ASC LIMIT 5 - ) -AND tm.org_id = 1 + tm.org_id = 1 AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 5; diff --git a/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_2.sql b/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_2.sql index ecbef2e0c01..767f1fe4c82 100755 --- a/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_2.sql +++ b/pkg/registry/apis/iam/legacy/testdata/mysql--team_bindings_query-team_bindings_page_2.sql @@ -3,12 +3,8 @@ FROM `grafana`.`team_member` tm INNER JOIN `grafana`.`team` t ON tm.team_id = t.id INNER JOIN `grafana`.`user` u ON tm.user_id = u.id WHERE - t.uid IN( - SELECT uid - FROM `grafana`.`team` t - WHERE t.id >= 2 - ORDER BY t.id ASC LIMIT 1 - ) -AND tm.org_id = 1 + tm.org_id = 1 + AND tm.id >= 2 AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 1; diff --git a/pkg/registry/apis/iam/legacy/testdata/mysql--update_team-update_team_basic.sql b/pkg/registry/apis/iam/legacy/testdata/mysql--update_team-update_team_basic.sql new file mode 100755 index 00000000000..3d0c3bf0520 --- /dev/null +++ b/pkg/registry/apis/iam/legacy/testdata/mysql--update_team-update_team_basic.sql @@ -0,0 +1,7 @@ +UPDATE `grafana`.`team` +SET name = 'Team 1', + updated = '2023-01-01 12:00:00', + email = 'team1@example.com', + is_provisioned = TRUE, + external_uid = 'team-1-uid' +WHERE uid = 'team-1' diff --git a/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_1_bindings.sql b/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_1_bindings.sql index 82d10b5fd51..50e968de4a1 100755 --- a/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_1_bindings.sql +++ b/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_1_bindings.sql @@ -3,7 +3,8 @@ FROM "grafana"."team_member" tm INNER JOIN "grafana"."team" t ON tm.team_id = t.id INNER JOIN "grafana"."user" u ON tm.user_id = u.id WHERE - t.uid = 'team-1' -AND tm.org_id = 1 + tm.org_id = 1 + AND t.uid = 'team-1' AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 1; diff --git a/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_1.sql b/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_1.sql index 77e1182cde8..76f9e5a8cca 100755 --- a/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_1.sql +++ b/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_1.sql @@ -3,11 +3,7 @@ FROM "grafana"."team_member" tm INNER JOIN "grafana"."team" t ON tm.team_id = t.id INNER JOIN "grafana"."user" u ON tm.user_id = u.id WHERE - t.uid IN( - SELECT uid - FROM "grafana"."team" t - ORDER BY t.id ASC LIMIT 5 - ) -AND tm.org_id = 1 + tm.org_id = 1 AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 5; diff --git a/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_2.sql b/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_2.sql index 2cec052d9da..c6608dd7968 100755 --- a/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_2.sql +++ b/pkg/registry/apis/iam/legacy/testdata/postgres--team_bindings_query-team_bindings_page_2.sql @@ -3,12 +3,8 @@ FROM "grafana"."team_member" tm INNER JOIN "grafana"."team" t ON tm.team_id = t.id INNER JOIN "grafana"."user" u ON tm.user_id = u.id WHERE - t.uid IN( - SELECT uid - FROM "grafana"."team" t - WHERE t.id >= 2 - ORDER BY t.id ASC LIMIT 1 - ) -AND tm.org_id = 1 + tm.org_id = 1 + AND tm.id >= 2 AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 1; diff --git a/pkg/registry/apis/iam/legacy/testdata/postgres--update_team-update_team_basic.sql b/pkg/registry/apis/iam/legacy/testdata/postgres--update_team-update_team_basic.sql new file mode 100755 index 00000000000..7ec26ef6c6a --- /dev/null +++ b/pkg/registry/apis/iam/legacy/testdata/postgres--update_team-update_team_basic.sql @@ -0,0 +1,7 @@ +UPDATE "grafana"."team" +SET name = 'Team 1', + updated = '2023-01-01 12:00:00', + email = 'team1@example.com', + is_provisioned = TRUE, + external_uid = 'team-1-uid' +WHERE uid = 'team-1' diff --git a/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_1_bindings.sql b/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_1_bindings.sql index 82d10b5fd51..50e968de4a1 100755 --- a/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_1_bindings.sql +++ b/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_1_bindings.sql @@ -3,7 +3,8 @@ FROM "grafana"."team_member" tm INNER JOIN "grafana"."team" t ON tm.team_id = t.id INNER JOIN "grafana"."user" u ON tm.user_id = u.id WHERE - t.uid = 'team-1' -AND tm.org_id = 1 + tm.org_id = 1 + AND t.uid = 'team-1' AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 1; diff --git a/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_1.sql b/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_1.sql index 77e1182cde8..76f9e5a8cca 100755 --- a/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_1.sql +++ b/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_1.sql @@ -3,11 +3,7 @@ FROM "grafana"."team_member" tm INNER JOIN "grafana"."team" t ON tm.team_id = t.id INNER JOIN "grafana"."user" u ON tm.user_id = u.id WHERE - t.uid IN( - SELECT uid - FROM "grafana"."team" t - ORDER BY t.id ASC LIMIT 5 - ) -AND tm.org_id = 1 + tm.org_id = 1 AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 5; diff --git a/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_2.sql b/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_2.sql index 2cec052d9da..c6608dd7968 100755 --- a/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_2.sql +++ b/pkg/registry/apis/iam/legacy/testdata/sqlite--team_bindings_query-team_bindings_page_2.sql @@ -3,12 +3,8 @@ FROM "grafana"."team_member" tm INNER JOIN "grafana"."team" t ON tm.team_id = t.id INNER JOIN "grafana"."user" u ON tm.user_id = u.id WHERE - t.uid IN( - SELECT uid - FROM "grafana"."team" t - WHERE t.id >= 2 - ORDER BY t.id ASC LIMIT 1 - ) -AND tm.org_id = 1 + tm.org_id = 1 + AND tm.id >= 2 AND NOT tm.external -ORDER BY t.id ASC; +ORDER BY t.id ASC +LIMIT 1; diff --git a/pkg/registry/apis/iam/legacy/testdata/sqlite--update_team-update_team_basic.sql b/pkg/registry/apis/iam/legacy/testdata/sqlite--update_team-update_team_basic.sql new file mode 100755 index 00000000000..7ec26ef6c6a --- /dev/null +++ b/pkg/registry/apis/iam/legacy/testdata/sqlite--update_team-update_team_basic.sql @@ -0,0 +1,7 @@ +UPDATE "grafana"."team" +SET name = 'Team 1', + updated = '2023-01-01 12:00:00', + email = 'team1@example.com', + is_provisioned = TRUE, + external_uid = 'team-1-uid' +WHERE uid = 'team-1' diff --git a/pkg/registry/apis/iam/legacy/update_team.sql b/pkg/registry/apis/iam/legacy/update_team.sql new file mode 100644 index 00000000000..4aa7bb52d7a --- /dev/null +++ b/pkg/registry/apis/iam/legacy/update_team.sql @@ -0,0 +1,7 @@ +UPDATE {{ .Ident .TeamTable }} +SET name = {{ .Arg .Command.Name }}, + updated = {{ .Arg .Command.Updated }}, + email = {{ .Arg .Command.Email }}, + is_provisioned = {{ .Arg .Command.IsProvisioned }}, + external_uid = {{ .Arg .Command.ExternalUID }} +WHERE uid = {{ .Arg .Command.UID }} diff --git a/pkg/registry/apis/iam/register.go b/pkg/registry/apis/iam/register.go index a92744700dc..767ec450bae 100644 --- a/pkg/registry/apis/iam/register.go +++ b/pkg/registry/apis/iam/register.go @@ -2,6 +2,7 @@ package iam import ( "context" + "fmt" "maps" "strings" @@ -152,6 +153,14 @@ func (b *IdentityAccessManagementAPIBuilder) AllowedV0Alpha1Resources() []string func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions) error { storage := map[string]rest.Storage{} + // teams + users must have shorter names because they are often used as part of another name + opts.StorageOptsRegister(iamv0.TeamResourceInfo.GroupResource(), apistore.StorageOptions{ + MaximumNameLength: 80, + }) + opts.StorageOptsRegister(iamv0.UserResourceInfo.GroupResource(), apistore.StorageOptions{ + MaximumNameLength: 80, + }) + teamResource := iamv0.TeamResourceInfo teamLegacyStore := team.NewLegacyStore(b.store, b.legacyAccessClient, b.enableAuthnMutation) storage[teamResource.StoragePath()] = teamLegacyStore @@ -345,6 +354,12 @@ func (b *IdentityAccessManagementAPIBuilder) Validate(ctx context.Context, a adm switch typedObj := a.GetObject().(type) { case *iamv0.ResourcePermission: return resourcepermission.ValidateCreateAndUpdateInput(ctx, typedObj) + case *iamv0.Team: + oldTeamObj, ok := a.GetOldObject().(*iamv0.Team) + if !ok { + return fmt.Errorf("expected old object to be a Team, got %T", oldTeamObj) + } + return team.ValidateOnUpdate(ctx, typedObj, oldTeamObj) } return nil case admission.Delete: diff --git a/pkg/registry/apis/iam/team/store.go b/pkg/registry/apis/iam/team/store.go index 88bf50986d6..e048fdb7a6d 100644 --- a/pkg/registry/apis/iam/team/store.go +++ b/pkg/registry/apis/iam/team/store.go @@ -112,7 +112,52 @@ func (s *LegacyStore) Delete(ctx context.Context, name string, deleteValidation // Update implements rest.Updater. func (s *LegacyStore) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { - return nil, false, apierrors.NewMethodNotSupported(resource.GroupResource(), "update") + if !s.enableAuthnMutation { + return nil, false, apierrors.NewMethodNotSupported(resource.GroupResource(), "update") + } + + ns, err := request.NamespaceInfoFrom(ctx, true) + if err != nil { + return nil, false, err + } + + oldObj, err := s.Get(ctx, name, nil) + if err != nil { + return oldObj, false, err + } + + obj, err := objInfo.UpdatedObject(ctx, oldObj) + if err != nil { + return oldObj, false, err + } + + teamObj, ok := obj.(*iamv0alpha1.Team) + if !ok { + return nil, false, fmt.Errorf("expected Team object, got %T", obj) + } + + if updateValidation != nil { + if err := updateValidation(ctx, obj, oldObj); err != nil { + return oldObj, false, err + } + } + + updateCmd := legacy.UpdateTeamCommand{ + UID: teamObj.Name, + Name: teamObj.Spec.Title, + Email: teamObj.Spec.Email, + IsProvisioned: teamObj.Spec.Provisioned, + ExternalUID: teamObj.Spec.ExternalUID, + } + + result, err := s.store.UpdateTeam(ctx, ns, updateCmd) + if err != nil { + return oldObj, false, err + } + + iamTeam := toTeamObject(result.Team, ns) + + return &iamTeam, false, nil } func (s *LegacyStore) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) { diff --git a/pkg/registry/apis/iam/team/store_binding.go b/pkg/registry/apis/iam/team/store_binding.go index cdb61777039..38b2a23237f 100644 --- a/pkg/registry/apis/iam/team/store_binding.go +++ b/pkg/registry/apis/iam/team/store_binding.go @@ -117,46 +117,36 @@ func (l *LegacyBindingStore) List(ctx context.Context, options *internalversion. return &list, nil } -func mapToBindingObject(ns claims.NamespaceInfo, b legacy.TeamBinding) iamv0alpha1.TeamBinding { +func mapToBindingObject(ns claims.NamespaceInfo, tm legacy.TeamMember) iamv0alpha1.TeamBinding { rv := time.Time{} ct := time.Now() - for _, m := range b.Members { - if m.Updated.After(rv) { - rv = m.Updated - } - if m.Created.Before(ct) { - ct = m.Created - } + if tm.Updated.After(rv) { + rv = tm.Updated + } + if tm.Created.Before(ct) { + ct = tm.Created } return iamv0alpha1.TeamBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: b.TeamUID, + Name: tm.TeamUID, Namespace: ns.Value, ResourceVersion: strconv.FormatInt(rv.UnixMilli(), 10), CreationTimestamp: metav1.NewTime(ct), }, Spec: iamv0alpha1.TeamBindingSpec{ TeamRef: iamv0alpha1.TeamBindingTeamRef{ - Name: b.TeamUID, + Name: tm.TeamUID, }, - Subjects: mapToSubjects(b.Members), + Subject: iamv0alpha1.TeamBindingspecSubject{ + Name: tm.UserUID, + }, + Permission: common.MapTeamPermission(tm.Permission), }, } } -func mapToSubjects(members []legacy.TeamMember) []iamv0alpha1.TeamBindingspecSubject { - out := make([]iamv0alpha1.TeamBindingspecSubject, 0, len(members)) - for _, m := range members { - out = append(out, iamv0alpha1.TeamBindingspecSubject{ - Name: m.UserUID, - Permission: common.MapTeamPermission(m.Permission), - }) - } - return out -} - func mapPermisson(p team.PermissionType) iamv0.TeamPermission { if p == team.PermissionTypeAdmin { return iamv0.TeamPermissionAdmin diff --git a/pkg/registry/apis/iam/team/validate.go b/pkg/registry/apis/iam/team/validate.go index 9205a560019..97777cb3c4e 100644 --- a/pkg/registry/apis/iam/team/validate.go +++ b/pkg/registry/apis/iam/team/validate.go @@ -30,3 +30,28 @@ func ValidateOnCreate(ctx context.Context, obj *iamv0alpha1.Team) error { return nil } + +func ValidateOnUpdate(ctx context.Context, obj, old *iamv0alpha1.Team) error { + requester, err := identity.GetRequester(ctx) + if err != nil { + return apierrors.NewUnauthorized("no identity found") + } + + if obj.Spec.Title == "" { + return apierrors.NewBadRequest("the team must have a title") + } + + if !requester.IsIdentityType(types.TypeServiceAccount) && obj.Spec.Provisioned && !old.Spec.Provisioned { + return apierrors.NewBadRequest("provisioned teams are only allowed for service accounts") + } + + if old.Spec.Provisioned && !obj.Spec.Provisioned { + return apierrors.NewBadRequest("provisioned teams cannot be updated to non-provisioned teams") + } + + if !obj.Spec.Provisioned && obj.Spec.ExternalUID != "" { + return apierrors.NewBadRequest("externalUID is only allowed for provisioned teams") + } + + return nil +} diff --git a/pkg/registry/apis/iam/team/validate_test.go b/pkg/registry/apis/iam/team/validate_test.go index d7acbab4915..1093a01f57d 100644 --- a/pkg/registry/apis/iam/team/validate_test.go +++ b/pkg/registry/apis/iam/team/validate_test.go @@ -115,3 +115,213 @@ func TestValidateOnCreate(t *testing.T) { }) } } + +func TestValidateOnUpdate(t *testing.T) { + tests := []struct { + name string + requester *identity.StaticRequester + obj *iamv0alpha1.Team + old *iamv0alpha1.Team + want error + }{ + { + name: "valid update - no changes to provisioned status", + requester: &identity.StaticRequester{ + Type: types.TypeUser, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + }, + }, + want: nil, + }, + { + name: "valid update - service account changing to provisioned", + requester: &identity.StaticRequester{ + Type: types.TypeServiceAccount, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + Provisioned: true, + ExternalUID: "test-uid", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + }, + }, + want: nil, + }, + { + name: "valid update - already provisioned team", + requester: &identity.StaticRequester{ + Type: types.TypeServiceAccount, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + Provisioned: true, + ExternalUID: "updated-uid", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + Provisioned: true, + ExternalUID: "original-uid", + }, + }, + want: nil, + }, + { + name: "invalid update - no title", + requester: &identity.StaticRequester{ + Type: types.TypeUser, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "", + Email: "updated@test.com", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + }, + }, + want: apierrors.NewBadRequest("the team must have a title"), + }, + { + name: "invalid update - user trying to change to provisioned", + requester: &identity.StaticRequester{ + Type: types.TypeUser, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + Provisioned: true, + ExternalUID: "test-uid", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + }, + }, + want: apierrors.NewBadRequest("provisioned teams are only allowed for service accounts"), + }, + { + name: "invalid update - changing from provisioned to non-provisioned", + requester: &identity.StaticRequester{ + Type: types.TypeServiceAccount, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + Provisioned: true, + ExternalUID: "original-uid", + }, + }, + want: apierrors.NewBadRequest("provisioned teams cannot be updated to non-provisioned teams"), + }, + { + name: "invalid update - has externalUID but not provisioned", + requester: &identity.StaticRequester{ + Type: types.TypeUser, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + ExternalUID: "test-uid", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + }, + }, + want: apierrors.NewBadRequest("externalUID is only allowed for provisioned teams"), + }, + { + name: "invalid update - no requester in context", + requester: nil, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + }, + }, + want: apierrors.NewUnauthorized("no identity found"), + }, + { + name: "valid update - adding externalUID to provisioned team", + requester: &identity.StaticRequester{ + Type: types.TypeServiceAccount, + OrgRole: identity.RoleAdmin, + }, + obj: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "updated title", + Email: "updated@test.com", + Provisioned: true, + ExternalUID: "new-uid", + }, + }, + old: &iamv0alpha1.Team{ + Spec: iamv0alpha1.TeamSpec{ + Title: "original title", + Email: "original@test.com", + Provisioned: true, + }, + }, + want: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := identity.WithRequester(context.Background(), test.requester) + err := ValidateOnUpdate(ctx, test.obj, test.old) + assert.Equal(t, test.want, err) + }) + } +} diff --git a/pkg/registry/apis/ofrep/proxy.go b/pkg/registry/apis/ofrep/proxy.go index efb5cc51930..b5791925b26 100644 --- a/pkg/registry/apis/ofrep/proxy.go +++ b/pkg/registry/apis/ofrep/proxy.go @@ -2,6 +2,7 @@ package ofrep import ( "bytes" + "context" "crypto/tls" "crypto/x509" "encoding/json" @@ -14,14 +15,21 @@ import ( "strconv" "github.com/grafana/grafana/pkg/cmd/grafana-cli/logger" + "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/util/proxyutil" goffmodel "github.com/thomaspoignant/go-feature-flag/cmd/relayproxy/model" ) -func (b *APIBuilder) proxyAllFlagReq(isAuthedUser bool, w http.ResponseWriter, r *http.Request) { +func (b *APIBuilder) proxyAllFlagReq(ctx context.Context, isAuthedUser bool, w http.ResponseWriter, r *http.Request) { + ctx, span := tracer.Start(ctx, "ofrep.proxy.evalAllFlags") + defer span.End() + + r = r.WithContext(ctx) + proxy, err := b.newProxy(ofrepPath) if err != nil { + err = tracing.Error(span, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -61,9 +69,15 @@ func (b *APIBuilder) proxyAllFlagReq(isAuthedUser bool, w http.ResponseWriter, r proxy.ServeHTTP(w, r) } -func (b *APIBuilder) proxyFlagReq(flagKey string, isAuthedUser bool, w http.ResponseWriter, r *http.Request) { +func (b *APIBuilder) proxyFlagReq(ctx context.Context, flagKey string, isAuthedUser bool, w http.ResponseWriter, r *http.Request) { + ctx, span := tracer.Start(ctx, "ofrep.proxy.evalFlag") + defer span.End() + + r = r.WithContext(ctx) + proxy, err := b.newProxy(path.Join(ofrepPath, flagKey)) if err != nil { + err = tracing.Error(span, err) b.logger.Error("Failed to create proxy", "key", flagKey, "error", err) http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/pkg/registry/apis/ofrep/register.go b/pkg/registry/apis/ofrep/register.go index 154e3f6b6fc..b765bbadc32 100644 --- a/pkg/registry/apis/ofrep/register.go +++ b/pkg/registry/apis/ofrep/register.go @@ -10,6 +10,9 @@ import ( "net/url" "github.com/gorilla/mux" + "github.com/grafana/grafana/pkg/infra/tracing" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,6 +34,8 @@ var _ builder.APIGroupBuilder = (*APIBuilder)(nil) var _ builder.APIGroupRouteProvider = (*APIBuilder)(nil) var _ builder.APIGroupVersionProvider = (*APIBuilder)(nil) +var tracer = otel.Tracer("github.com/grafana/grafana/pkg/registry/apis/ofrep") + const ofrepPath = "/ofrep/v1/evaluate/flags" const namespaceMismatchMsg = "rejecting request with namespace mismatch" @@ -240,7 +245,13 @@ func (b *APIBuilder) GetAPIRoutes(gv schema.GroupVersion) *builder.APIRoutes { } func (b *APIBuilder) oneFlagHandler(w http.ResponseWriter, r *http.Request) { + ctx, span := tracer.Start(r.Context(), "ofrep.handler.evalFlag") + defer span.End() + + r = r.WithContext(ctx) + if !b.validateNamespace(r) { + _ = tracing.Errorf(span, namespaceMismatchMsg) b.logger.Error(namespaceMismatchMsg) http.Error(w, namespaceMismatchMsg, http.StatusUnauthorized) return @@ -248,42 +259,54 @@ func (b *APIBuilder) oneFlagHandler(w http.ResponseWriter, r *http.Request) { flagKey := mux.Vars(r)["flagKey"] if flagKey == "" { + _ = tracing.Errorf(span, "flagKey parameter is required") http.Error(w, "flagKey parameter is required", http.StatusBadRequest) return } + span.SetAttributes(attribute.String("flag_key", flagKey)) + isAuthedReq := b.isAuthenticatedRequest(r) + span.SetAttributes(attribute.Bool("authenticated", isAuthedReq)) // Unless the request is authenticated, we only allow public flags evaluations if !isAuthedReq && !isPublicFlag(flagKey) { + _ = tracing.Errorf(span, "unauthorized to evaluate flag: %s", flagKey) b.logger.Error("Unauthorized to evaluate flag", "flagKey", flagKey) http.Error(w, "unauthorized to evaluate flag", http.StatusUnauthorized) return } if b.providerType == setting.GOFFProviderType { - b.proxyFlagReq(flagKey, isAuthedReq, w, r) + b.proxyFlagReq(ctx, flagKey, isAuthedReq, w, r) return } - b.evalFlagStatic(flagKey, w, r) + b.evalFlagStatic(ctx, flagKey, w) } func (b *APIBuilder) allFlagsHandler(w http.ResponseWriter, r *http.Request) { + ctx, span := tracer.Start(r.Context(), "ofrep.handler.evalAllFlags") + defer span.End() + + r = r.WithContext(ctx) + if !b.validateNamespace(r) { + _ = tracing.Errorf(span, namespaceMismatchMsg) b.logger.Error(namespaceMismatchMsg) http.Error(w, namespaceMismatchMsg, http.StatusUnauthorized) return } isAuthedReq := b.isAuthenticatedRequest(r) + span.SetAttributes(attribute.Bool("authenticated", isAuthedReq)) if b.providerType == setting.GOFFProviderType { - b.proxyAllFlagReq(isAuthedReq, w, r) + b.proxyAllFlagReq(ctx, isAuthedReq, w, r) return } - b.evalAllFlagsStatic(isAuthedReq, w, r) + b.evalAllFlagsStatic(ctx, isAuthedReq, w) } func writeResponse(statusCode int, result any, logger log.Logger, w http.ResponseWriter) { diff --git a/pkg/registry/apis/ofrep/static.go b/pkg/registry/apis/ofrep/static.go index 8d56104649d..18ff794349d 100644 --- a/pkg/registry/apis/ofrep/static.go +++ b/pkg/registry/apis/ofrep/static.go @@ -1,19 +1,28 @@ package ofrep import ( + "context" "net/http" + "github.com/grafana/grafana/pkg/infra/tracing" goffmodel "github.com/thomaspoignant/go-feature-flag/cmd/relayproxy/model" + "go.opentelemetry.io/otel/attribute" ) -func (b *APIBuilder) evalAllFlagsStatic(isAuthedUser bool, w http.ResponseWriter, r *http.Request) { - result, err := b.staticEvaluator.EvalAllFlags(r.Context()) +func (b *APIBuilder) evalAllFlagsStatic(ctx context.Context, isAuthedUser bool, w http.ResponseWriter) { + _, span := tracer.Start(ctx, "ofrep.static.evalAllFlags") + defer span.End() + + result, err := b.staticEvaluator.EvalAllFlags(ctx) if err != nil { + err = tracing.Error(span, err) b.logger.Error("Failed to evaluate all static flags", "error", err) http.Error(w, "failed to evaluate flags", http.StatusInternalServerError) return } + span.SetAttributes(attribute.Int("total_flags_count", len(result.Flags))) + if !isAuthedUser { var publicOnly []goffmodel.OFREPFlagBulkEvaluateSuccessResponse @@ -24,14 +33,21 @@ func (b *APIBuilder) evalAllFlagsStatic(isAuthedUser bool, w http.ResponseWriter } result.Flags = publicOnly + span.SetAttributes(attribute.Int("public_flags_count", len(publicOnly))) } writeResponse(http.StatusOK, result, b.logger, w) } -func (b *APIBuilder) evalFlagStatic(flagKey string, w http.ResponseWriter, r *http.Request) { - result, err := b.staticEvaluator.EvalFlag(r.Context(), flagKey) +func (b *APIBuilder) evalFlagStatic(ctx context.Context, flagKey string, w http.ResponseWriter) { + _, span := tracer.Start(ctx, "ofrep.static.evalFlag") + defer span.End() + + span.SetAttributes(attribute.String("flag_key", flagKey)) + + result, err := b.staticEvaluator.EvalFlag(ctx, flagKey) if err != nil { + err = tracing.Error(span, err) b.logger.Error("Failed to evaluate static flag", "key", flagKey, "error", err) http.Error(w, "failed to evaluate flag", http.StatusInternalServerError) return diff --git a/pkg/registry/apis/preferences/legacy/preferences.go b/pkg/registry/apis/preferences/legacy/preferences.go index e9fd6b32aed..ab7eb2d4891 100644 --- a/pkg/registry/apis/preferences/legacy/preferences.go +++ b/pkg/registry/apis/preferences/legacy/preferences.go @@ -13,11 +13,13 @@ import ( requestK8s "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" + authlib "github.com/grafana/authlib/types" preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1" "github.com/grafana/grafana/pkg/apimachinery/identity" utilsOrig "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/registry/apis/preferences/utils" "github.com/grafana/grafana/pkg/services/apiserver/endpoints/request" + pref "github.com/grafana/grafana/pkg/services/preference" ) var ( @@ -26,13 +28,14 @@ var ( _ rest.Getter = (*preferenceStorage)(nil) _ rest.Lister = (*preferenceStorage)(nil) _ rest.Storage = (*preferenceStorage)(nil) - // _ rest.Creater = (*preferenceStorage)(nil) - // _ rest.Updater = (*preferenceStorage)(nil) - // _ rest.GracefulDeleter = (*preferenceStorage)(nil) + _ rest.Creater = (*preferenceStorage)(nil) + _ rest.Updater = (*preferenceStorage)(nil) + _ rest.GracefulDeleter = (*preferenceStorage)(nil) ) -func NewPreferencesStorage(namespacer request.NamespaceMapper, sql *LegacySQL) *preferenceStorage { +func NewPreferencesStorage(pref pref.Service, namespacer request.NamespaceMapper, sql *LegacySQL) *preferenceStorage { return &preferenceStorage{ + prefs: pref, namespacer: namespacer, sql: sql, tableConverter: preferences.PreferencesResourceInfo.TableConverter(), @@ -43,6 +46,7 @@ type preferenceStorage struct { namespacer request.NamespaceMapper tableConverter rest.TableConvertor sql *LegacySQL + prefs pref.Service } func (s *preferenceStorage) New() runtime.Object { @@ -73,7 +77,7 @@ func (s *preferenceStorage) List(ctx context.Context, options *internalversion.L return nil, err } ns := requestK8s.NamespaceValue(ctx) - if user.GetIsGrafanaAdmin() { + if user.GetIdentityType() == authlib.TypeAccessPolicy { user = nil // nill user can see everything } return s.sql.ListPreferences(ctx, ns, user, true) @@ -116,6 +120,151 @@ func (s *preferenceStorage) Get(ctx context.Context, name string, options *metav return nil, preferences.PreferencesResourceInfo.NewNotFound(name) } +func (s *preferenceStorage) save(ctx context.Context, obj runtime.Object) (runtime.Object, error) { + user, err := identity.GetRequester(ctx) + if err != nil { + return nil, err + } + + p, ok := obj.(*preferences.Preferences) + if !ok { + return nil, fmt.Errorf("expected preferences") + } + + owner, ok := utils.ParseOwnerFromName(p.Name) + if !ok { + return nil, fmt.Errorf("invalid name") + } + + cmd := &pref.SavePreferenceCommand{ + OrgID: user.GetOrgID(), + HomeDashboardUID: p.Spec.HomeDashboardUID, + } + if p.Spec.Timezone != nil { + cmd.Timezone = *p.Spec.Timezone + } + if p.Spec.WeekStart != nil { + cmd.WeekStart = *p.Spec.WeekStart + } + if p.Spec.Theme != nil { + cmd.Theme = *p.Spec.Theme + } + if p.Spec.Language != nil { + cmd.Language = *p.Spec.Language + } + if p.Spec.RegionalFormat != nil { + cmd.RegionalFormat = *p.Spec.RegionalFormat + } + if p.Spec.QueryHistory != nil { + cmd.QueryHistory = &pref.QueryHistoryPreference{ + HomeTab: *p.Spec.QueryHistory.HomeTab, + } + } + if p.Spec.Navbar != nil { + cmd.Navbar = &pref.NavbarPreference{ + BookmarkUrls: p.Spec.Navbar.BookmarkUrls, + } + } + if p.Spec.CookiePreferences != nil { + cmd.CookiePreferences = []pref.CookieType{} + if p.Spec.CookiePreferences.Analytics != nil { + cmd.CookiePreferences = append(cmd.CookiePreferences, "analytics") + } + if p.Spec.CookiePreferences.Functional != nil { + cmd.CookiePreferences = append(cmd.CookiePreferences, "functional") + } + if p.Spec.CookiePreferences.Performance != nil { + cmd.CookiePreferences = append(cmd.CookiePreferences, "performance") + } + } + + switch owner.Owner { + case utils.NamespaceResourceOwner: + // the org ID is already set + + case utils.UserResourceOwner: + if user.GetIdentifier() != owner.Identifier { + return nil, fmt.Errorf("only the user can save preferences") + } + cmd.UserID, err = user.GetInternalID() + if err != nil { + return nil, err + } + case utils.TeamResourceOwner: + cmd.TeamID, err = s.sql.getLegacyTeamID(ctx, user.GetOrgID(), owner.Identifier) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("unsupported name") + } + + if err = s.prefs.Save(ctx, cmd); err != nil { + return nil, err + } + return s.Get(ctx, owner.AsName(), &metav1.GetOptions{}) +} + +// Create implements rest.Creater. +func (s *preferenceStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { + return s.save(ctx, obj) +} + +// Update implements rest.Updater. +func (s *preferenceStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { + old, err := s.Get(ctx, name, &metav1.GetOptions{}) + if err != nil { + return nil, false, err + } + + obj, err := objInfo.UpdatedObject(ctx, old) + if err != nil { + return nil, false, err + } + + obj, err = s.save(ctx, obj) + return obj, false, err +} + +// Delete implements rest.GracefulDeleter. +func (s *preferenceStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { + user, err := identity.GetRequester(ctx) + if err != nil { + return nil, false, err + } + + owner, ok := utils.ParseOwnerFromName(name) + if !ok { + return nil, false, fmt.Errorf("invalid name") + } + + cmd := &pref.DeleteCommand{} + + switch owner.Owner { + case utils.TeamResourceOwner: + cmd.TeamID, err = user.GetInternalID() + if err != nil { + return nil, false, err + } + + case utils.UserResourceOwner: + cmd.UserID, err = user.GetInternalID() + if err != nil { + return nil, false, err + } + + case utils.NamespaceResourceOwner: + cmd.OrgID = user.GetOrgID() + + default: + return nil, false, fmt.Errorf("unsupported owner") + } + + err = s.prefs.Delete(ctx, cmd) + return nil, (err == nil), err +} + func asPreferencesResource(ns string, p *preferenceModel) preferences.Preferences { owner := utils.OwnerReference{} if p.TeamUID.Valid { diff --git a/pkg/registry/apis/preferences/legacy/sql.go b/pkg/registry/apis/preferences/legacy/sql.go index bf3e8900f81..fb4cc60fcbf 100644 --- a/pkg/registry/apis/preferences/legacy/sql.go +++ b/pkg/registry/apis/preferences/legacy/sql.go @@ -49,7 +49,7 @@ func NewLegacySQL(db legacysql.LegacyDatabaseProvider) *LegacySQL { } // NOTE: this does not support paging -- lets check if that will be a problem in cloud -func (s *LegacySQL) GetStars(ctx context.Context, orgId int64, user string) ([]dashboardStars, int64, error) { +func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user string) ([]dashboardStars, int64, error) { var max sql.NullString sql, err := s.db(ctx) if err != nil { @@ -120,7 +120,10 @@ func (s *LegacySQL) GetStars(ctx context.Context, orgId int64, user string) ([]d return nil, 0, fmt.Errorf("unable to get RV %w", err) } if max.Valid && max.String != "" { - fmt.Printf("max RV: %s\n", max.String) + t, _ := time.Parse(time.RFC3339, max.String) + if !t.IsZero() { + updated = t + } } else { updated = s.startup } @@ -206,7 +209,10 @@ func (s *LegacySQL) listPreferences(ctx context.Context, return nil, 0, fmt.Errorf("unable to get RV %w", err) } if max.Valid && max.String != "" { - fmt.Printf("max RV: %s\n", max.String) + t, _ := time.Parse(time.RFC3339, max.String) + if !t.IsZero() { + rv.Time = t + } } else { rv.Time = s.startup } @@ -229,7 +235,7 @@ func (s *LegacySQL) ListPreferences(ctx context.Context, ns string, user identit found, rv, err := s.listPreferences(ctx, ns, info.OrgID, func(req *preferencesQuery) (bool, error) { if user != nil { - req.UserUID = user.GetRawIdentifier() + req.UserUID = user.GetIdentifier() teams, err = s.GetTeams(ctx, &identity.StaticRequester{ OrgID: info.OrgID, UserUID: req.UserUID, @@ -243,7 +249,7 @@ func (s *LegacySQL) ListPreferences(ctx context.Context, ns string, user identit return true } if p.UserUID.String != "" { - return user.GetRawIdentifier() == p.UserUID.String + return user.GetIdentifier() == p.UserUID.String } if p.TeamUID.String != "" { return slices.Contains(teams, p.TeamUID.String) @@ -293,3 +299,15 @@ func (s *LegacySQL) GetTeams(ctx context.Context, id authlib.AuthInfo, admin boo err = sess.Select(ctx, &teams, q, req.GetArgs()...) return teams, err } + +func (s *LegacySQL) getLegacyTeamID(ctx context.Context, orgId int64, team string) (int64, error) { + sql, err := s.db(ctx) + if err != nil { + return 0, err + } + + var id int64 + sess := sql.DB.GetSqlxSession() + err = sess.Select(ctx, &id, "SELECT id FROM team WHERE org_id=? AND uid=?", orgId, team) + return id, err +} diff --git a/pkg/registry/apis/preferences/legacy/stars.go b/pkg/registry/apis/preferences/legacy/stars.go index 87f5300799d..b048c249170 100644 --- a/pkg/registry/apis/preferences/legacy/stars.go +++ b/pkg/registry/apis/preferences/legacy/stars.go @@ -97,13 +97,13 @@ func (s *DashboardStarsStorage) List(ctx context.Context, options *internalversi return nil, err } - user := userInfo.GetUID() - if userInfo.GetIsGrafanaAdmin() || userInfo.GetIdentityType() == authlib.TypeAccessPolicy { + user := userInfo.GetIdentifier() + if userInfo.GetIdentityType() == authlib.TypeAccessPolicy { user = "" // can see everything } list := &preferences.StarsList{} - found, rv, err := s.sql.GetStars(ctx, ns.OrgID, user) + found, rv, err := s.sql.getDashboardStars(ctx, ns.OrgID, user) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func (s *DashboardStarsStorage) Get(ctx context.Context, name string, options *m return nil, err } - found, _, err := s.sql.GetStars(ctx, ns.OrgID, owner.Identifier) + found, _, err := s.sql.getDashboardStars(ctx, ns.OrgID, owner.Identifier) if err != nil { return nil, err } @@ -187,7 +187,7 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Star }}, err } - current, _, err := s.sql.GetStars(ctx, ns.OrgID, owner.Identifier) + current, _, err := s.sql.getDashboardStars(ctx, ns.OrgID, owner.Identifier) if err != nil { return nil, err } diff --git a/pkg/registry/apis/preferences/merged_preferences.go b/pkg/registry/apis/preferences/preferences_merged.go similarity index 100% rename from pkg/registry/apis/preferences/merged_preferences.go rename to pkg/registry/apis/preferences/preferences_merged.go diff --git a/pkg/registry/apis/preferences/merged_preferences_test.go b/pkg/registry/apis/preferences/preferences_merged_test.go similarity index 100% rename from pkg/registry/apis/preferences/merged_preferences_test.go rename to pkg/registry/apis/preferences/preferences_merged_test.go diff --git a/pkg/registry/apis/preferences/register.go b/pkg/registry/apis/preferences/register.go index 2c3d3623c62..ff186c6bdb2 100644 --- a/pkg/registry/apis/preferences/register.go +++ b/pkg/registry/apis/preferences/register.go @@ -72,7 +72,7 @@ func RegisterAPIService( namespacer := request.GetNamespaceMapper(cfg) if prefs != nil { - builder.legacyPrefs = legacy.NewPreferencesStorage(namespacer, sql) + builder.legacyPrefs = legacy.NewPreferencesStorage(prefs, namespacer, sql) } if stars != nil { builder.legacyStars = legacy.NewDashboardStarsStorage(stars, users, namespacer, sql) @@ -112,6 +112,7 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI if err != nil { return err } + stars = &starStorage{store: stars} // wrap List so we only return one value if b.legacyStars != nil && opts.DualWriteBuilder != nil { stars, err = opts.DualWriteBuilder(resource.GroupResource(), b.legacyStars, stars) if err != nil { diff --git a/pkg/registry/apis/preferences/stars.go b/pkg/registry/apis/preferences/stars.go new file mode 100644 index 00000000000..68bbb5c61bd --- /dev/null +++ b/pkg/registry/apis/preferences/stars.go @@ -0,0 +1,104 @@ +package preferences + +import ( + "context" + + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/rest" + + authlib "github.com/grafana/authlib/types" + preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1" + "github.com/grafana/grafana/pkg/apimachinery/identity" + grafanarest "github.com/grafana/grafana/pkg/apiserver/rest" +) + +var _ grafanarest.Storage = (*starStorage)(nil) + +type starStorage struct { + store grafanarest.Storage +} + +// When using list, we really just want to get the value for the single user +func (s *starStorage) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) { + user, err := identity.GetRequester(ctx) + if err != nil { + return nil, err + } + + switch user.GetIdentityType() { + case authlib.TypeAnonymous: + return s.NewList(), nil + + // Get the single user stars + case authlib.TypeUser: + stars := &preferences.StarsList{} + obj, _ := s.store.Get(ctx, "user-"+user.GetIdentifier(), &v1.GetOptions{}) + if obj != nil { + s, ok := obj.(*preferences.Stars) + if ok { + stars.Items = []preferences.Stars{*s} + } + } + return stars, nil + + default: + return s.store.List(ctx, options) + } +} + +// ConvertToTable implements rest.Storage. +func (s *starStorage) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*v1.Table, error) { + return s.store.ConvertToTable(ctx, obj, tableOptions) +} + +// Create implements rest.Storage. +func (s *starStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *v1.CreateOptions) (runtime.Object, error) { + return s.store.Create(ctx, obj, createValidation, options) +} + +// Delete implements rest.Storage. +func (s *starStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *v1.DeleteOptions) (runtime.Object, bool, error) { + return s.store.Delete(ctx, name, deleteValidation, options) +} + +// DeleteCollection implements rest.Storage. +func (s *starStorage) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *v1.DeleteOptions, listOptions *internalversion.ListOptions) (runtime.Object, error) { + return s.store.DeleteCollection(ctx, deleteValidation, options, listOptions) +} + +// Destroy implements rest.Storage. +func (s *starStorage) Destroy() { + s.store.Destroy() +} + +// Get implements rest.Storage. +func (s *starStorage) Get(ctx context.Context, name string, options *v1.GetOptions) (runtime.Object, error) { + return s.store.Get(ctx, name, options) +} + +// GetSingularName implements rest.Storage. +func (s *starStorage) GetSingularName() string { + return s.store.GetSingularName() +} + +// NamespaceScoped implements rest.Storage. +func (s *starStorage) NamespaceScoped() bool { + return s.store.NamespaceScoped() +} + +// New implements rest.Storage. +func (s *starStorage) New() runtime.Object { + return s.store.New() +} + +// NewList implements rest.Storage. +func (s *starStorage) NewList() runtime.Object { + return s.store.NewList() +} + +// Update implements rest.Storage. +func (s *starStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *v1.UpdateOptions) (runtime.Object, bool, error) { + return s.store.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) +} diff --git a/pkg/registry/apis/preferences/update_stars.go b/pkg/registry/apis/preferences/stars_update.go similarity index 100% rename from pkg/registry/apis/preferences/update_stars.go rename to pkg/registry/apis/preferences/stars_update.go diff --git a/pkg/registry/apis/preferences/update_stars_test.go b/pkg/registry/apis/preferences/stars_update_test.go similarity index 100% rename from pkg/registry/apis/preferences/update_stars_test.go rename to pkg/registry/apis/preferences/stars_update_test.go diff --git a/pkg/registry/apis/provisioning/controller/finalizers.go b/pkg/registry/apis/provisioning/controller/finalizers.go index 18f00c17c40..d9c41ee4c15 100644 --- a/pkg/registry/apis/provisioning/controller/finalizers.go +++ b/pkg/registry/apis/provisioning/controller/finalizers.go @@ -120,8 +120,8 @@ func (f *finalizer) processExistingItems( Group: item.Group, Resource: item.Resource, }) - logger.Error("error getting client for resource", "resource", item.Resource, "error", err) if err != nil { + logger.Error("error getting client for resource", "resource", item.Resource, "error", err) return count, err } diff --git a/pkg/registry/apis/provisioning/controller/health.go b/pkg/registry/apis/provisioning/controller/health.go index cf3aeedc36d..ed5f19c6ed0 100644 --- a/pkg/registry/apis/provisioning/controller/health.go +++ b/pkg/registry/apis/provisioning/controller/health.go @@ -12,6 +12,13 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ( + // recentHealthyDuration defines how recent a health check must be to be considered "recent" when healthy + recentHealthyDuration = 5 * time.Minute + // recentHealthyDuration defines how recent a health check must be to be considered "recent" when unhealthy + recentUnhealthyDuration = 1 * time.Minute +) + // StatusPatcher defines the interface for updating repository status // //go:generate mockery --name=StatusPatcher @@ -23,14 +30,16 @@ type StatusPatcher interface { type HealthChecker struct { statusPatcher StatusPatcher healthMetrics healthMetrics + tester repository.SimpleRepositoryTester } // NewHealthChecker creates a new health checker -func NewHealthChecker(statusPatcher StatusPatcher, registry prometheus.Registerer) *HealthChecker { +func NewHealthChecker(statusPatcher StatusPatcher, registry prometheus.Registerer, tester repository.SimpleRepositoryTester) *HealthChecker { healthMetrics := registerHealthMetrics(registry) return &HealthChecker{ statusPatcher: statusPatcher, healthMetrics: healthMetrics, + tester: tester, } } @@ -58,9 +67,9 @@ func (hc *HealthChecker) hasRecentHealthCheck(healthStatus provisioning.HealthSt age := time.Since(time.UnixMilli(healthStatus.Checked)) if healthStatus.Healthy { - return age <= time.Minute*5 // Recent if checked within 5 minutes when healthy + return age <= recentHealthyDuration } - return age <= time.Minute // Recent if checked within 1 minute when unhealthy + return age <= recentUnhealthyDuration // Recent if checked within 1 minute when unhealthy } // HasRecentFailure checks if there's a recent failure of a specific type @@ -70,7 +79,7 @@ func (hc *HealthChecker) HasRecentFailure(healthStatus provisioning.HealthStatus } age := time.Since(time.UnixMilli(healthStatus.Checked)) - return age <= time.Minute // Recent if within 1 minute + return age <= recentUnhealthyDuration } // RecordFailureAndUpdate records a failure and updates the repository status @@ -109,7 +118,11 @@ func (hc *HealthChecker) hasHealthStatusChanged(old, new provisioning.HealthStat return true } - if old.Checked != new.Checked { + recent := recentUnhealthyDuration + if new.Healthy { + recent = recentHealthyDuration + } + if time.UnixMilli(new.Checked).Sub(time.UnixMilli(old.Checked)) > recent { return true } @@ -176,7 +189,7 @@ func (hc *HealthChecker) refreshHealth(ctx context.Context, repo repository.Repo hc.healthMetrics.RecordHealthCheck(outcome, time.Since(start).Seconds()) }() - res, err := repository.TestRepository(ctx, repo) + res, err := hc.tester.TestRepository(ctx, repo) if err != nil { outcome = utils.ErrorOutcome logger.Error("failed to test repository", "error", err) diff --git a/pkg/registry/apis/provisioning/controller/health_test.go b/pkg/registry/apis/provisioning/controller/health_test.go index 74f8a127be6..91a0a67a131 100644 --- a/pkg/registry/apis/provisioning/controller/health_test.go +++ b/pkg/registry/apis/provisioning/controller/health_test.go @@ -13,13 +13,15 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" + repository "github.com/grafana/grafana/apps/provisioning/pkg/repository" "github.com/grafana/grafana/pkg/registry/apis/provisioning/controller/mocks" ) func TestNewHealthChecker(t *testing.T) { mockPatcher := mocks.NewStatusPatcher(t) - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) assert.NotNil(t, hc) assert.Equal(t, mockPatcher, hc.statusPatcher) @@ -136,7 +138,8 @@ func TestShouldCheckHealth(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mockPatcher := mocks.NewStatusPatcher(t) - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) result := hc.ShouldCheckHealth(tt.repo) assert.Equal(t, tt.expected, result) @@ -223,7 +226,8 @@ func TestHasRecentFailure(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mockPatcher := mocks.NewStatusPatcher(t) - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) result := hc.HasRecentFailure(tt.healthStatus, tt.failureType) assert.Equal(t, tt.expected, result) @@ -265,7 +269,8 @@ func TestRecordFailure(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mockPatcher := mocks.NewStatusPatcher(t) - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) repo := &provisioning.Repository{ Status: provisioning.RepositoryStatus{ @@ -310,7 +315,8 @@ func TestRecordFailure(t *testing.T) { func TestRecordFailureFunction(t *testing.T) { mockPatcher := mocks.NewStatusPatcher(t) - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) testErr := errors.New("test error") result := hc.recordFailure(provisioning.HealthFailureHook, testErr) @@ -394,6 +400,26 @@ func TestRefreshHealth(t *testing.T) { expectedHealth: true, expectPatch: false, }, + { + name: "no status change - no patch needed for unhealthy repo (recent check)", + testResult: &provisioning.TestResults{ + Success: false, + Code: 500, + Errors: []provisioning.ErrorDetails{ + {Detail: "connection failed"}, + {Detail: "timeout"}, + }, + }, + testError: nil, + existingStatus: provisioning.HealthStatus{ + Healthy: false, + Checked: time.Now().Add(-15 * time.Second).UnixMilli(), + Message: []string{"connection failed", "timeout"}, + }, + expectError: false, + expectedHealth: false, + expectPatch: false, + }, { name: "status unchanged but timestamp needs update (old check)", testResult: &provisioning.TestResults{ @@ -409,6 +435,26 @@ func TestRefreshHealth(t *testing.T) { expectedHealth: true, expectPatch: true, }, + { + name: "status unchanged but timestamp needs update (old unhealthy check)", + testResult: &provisioning.TestResults{ + Success: false, + Code: 500, + Errors: []provisioning.ErrorDetails{ + {Detail: "connection failed"}, + {Detail: "timeout"}, + }, + }, + testError: nil, + existingStatus: provisioning.HealthStatus{ + Healthy: false, + Checked: time.Now().Add(-2 * time.Minute).UnixMilli(), + Message: []string{"connection failed", "timeout"}, + }, + expectError: false, + expectedHealth: false, + expectPatch: true, + }, { name: "patch error", testResult: &provisioning.TestResults{ @@ -447,7 +493,8 @@ func TestRefreshHealth(t *testing.T) { testError: tt.testError, } - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) if tt.expectPatch { if tt.patchError != nil { @@ -557,7 +604,8 @@ func TestHasHealthStatusChanged(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mockPatcher := mocks.NewStatusPatcher(t) - hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry()) + validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true) + hc := NewHealthChecker(mockPatcher, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator)) result := hc.hasHealthStatusChanged(tt.old, tt.new) assert.Equal(t, tt.expected, result) diff --git a/pkg/registry/apis/provisioning/jobs.go b/pkg/registry/apis/provisioning/jobs.go index 4a344b60192..b1e174a04b7 100644 --- a/pkg/registry/apis/provisioning/jobs.go +++ b/pkg/registry/apis/provisioning/jobs.go @@ -20,16 +20,23 @@ type JobQueueGetter interface { } type jobsConnector struct { - repoGetter RepoGetter - jobs JobQueueGetter - historic jobs.HistoryReader + repoGetter RepoGetter + statusPatcherProvider StatusPatcherProvider + jobs JobQueueGetter + historic jobs.HistoryReader } -func NewJobsConnector(repoGetter RepoGetter, jobs JobQueueGetter, historic jobs.HistoryReader) *jobsConnector { +func NewJobsConnector( + repoGetter RepoGetter, + statusPatcherProvider StatusPatcherProvider, + jobs JobQueueGetter, + historic jobs.HistoryReader, +) *jobsConnector { return &jobsConnector{ - repoGetter: repoGetter, - jobs: jobs, - historic: historic, + repoGetter: repoGetter, + statusPatcherProvider: statusPatcherProvider, + jobs: jobs, + historic: historic, } } @@ -125,6 +132,23 @@ func (c *jobsConnector) Connect( } spec.Repository = name + // If a sync job is being created, we should update its status to pending. + if spec.Pull != nil { + err = c.statusPatcherProvider.GetStatusPatcher().Patch(ctx, cfg, map[string]interface{}{ + "op": "replace", + "path": "/status/sync", + "value": &provisioning.SyncStatus{ + State: provisioning.JobStatePending, + LastRef: cfg.Status.Sync.LastRef, + Started: time.Now().UnixMilli(), + }, + }) + if err != nil { + responder.Error(err) + return + } + } + job, err := c.jobs.GetJobQueue().Insert(ctx, cfg.Namespace, spec) if err != nil { responder.Error(err) diff --git a/pkg/registry/apis/provisioning/jobs/export/folders.go b/pkg/registry/apis/provisioning/jobs/export/folders.go index cb3b4a5f059..4b11f03172a 100644 --- a/pkg/registry/apis/provisioning/jobs/export/folders.go +++ b/pkg/registry/apis/provisioning/jobs/export/folders.go @@ -45,11 +45,11 @@ func ExportFolders(ctx context.Context, repoName string, options provisioning.Ex progress.SetMessage(ctx, "write folders to repository") err := repositoryResources.EnsureFolderTreeExists(ctx, options.Branch, options.Path, tree, func(folder resources.Folder, created bool, err error) error { result := jobs.JobResourceResult{ - Action: repository.FileActionCreated, - Name: folder.ID, - Resource: resources.FolderResource.Resource, - Group: resources.FolderResource.Group, - Path: folder.Path, + Action: repository.FileActionCreated, + Name: folder.ID, + Group: resources.FolderResource.Group, + Kind: resources.FolderKind.Kind, + Path: folder.Path, } if err != nil { diff --git a/pkg/registry/apis/provisioning/jobs/export/resources.go b/pkg/registry/apis/provisioning/jobs/export/resources.go index c07a871815a..445f25faca4 100644 --- a/pkg/registry/apis/provisioning/jobs/export/resources.go +++ b/pkg/registry/apis/provisioning/jobs/export/resources.go @@ -98,10 +98,10 @@ func exportResource(ctx context.Context, return resources.ForEach(ctx, client, func(item *unstructured.Unstructured) (err error) { gvk := item.GroupVersionKind() result := jobs.JobResourceResult{ - Name: item.GetName(), - Resource: resource, - Group: gvk.Group, - Action: repository.FileActionCreated, + Name: item.GetName(), + Group: gvk.Group, + Kind: gvk.Kind, + Action: repository.FileActionCreated, } // Check if resource is already managed by a repository diff --git a/pkg/registry/apis/provisioning/jobs/loki_history_test.go b/pkg/registry/apis/provisioning/jobs/loki_history_test.go index 026e889431d..26d7f01ab4f 100644 --- a/pkg/registry/apis/provisioning/jobs/loki_history_test.go +++ b/pkg/registry/apis/provisioning/jobs/loki_history_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/grafana/grafana/apps/provisioning/pkg/loki" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -17,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/types" provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" + "github.com/grafana/grafana/apps/provisioning/pkg/loki" ) func TestLokiJobHistory_WriteJob(t *testing.T) { @@ -86,16 +86,16 @@ func TestLokiJobHistory_WriteJob(t *testing.T) { Errors: []string{"warning: deprecated field used"}, Progress: 100.0, Summary: []*provisioning.JobResourceSummary{{ - Group: "dashboard.grafana.app", - Resource: "dashboards", - Total: 10, - Create: 3, - Update: 5, - Delete: 1, - Write: 8, - Error: 1, - Noop: 0, - Errors: []string{"failed to process dashboard-x"}, + Group: "dashboard.grafana.app", + Kind: "Dashboard", + Total: 10, + Create: 3, + Update: 5, + Delete: 1, + Write: 8, + Error: 1, + Noop: 0, + Errors: []string{"failed to process dashboard-x"}, }}, }, } @@ -178,7 +178,7 @@ func TestLokiJobHistory_WriteJob(t *testing.T) { require.Len(t, deserializedJob.Status.Summary, 1) summary := deserializedJob.Status.Summary[0] assert.Equal(t, "dashboard.grafana.app", summary.Group) - assert.Equal(t, "dashboards", summary.Resource) + assert.Equal(t, "Dashboard", summary.Kind) assert.Equal(t, int64(10), summary.Total) assert.Equal(t, int64(3), summary.Create) assert.Equal(t, int64(5), summary.Update) diff --git a/pkg/registry/apis/provisioning/jobs/migrate/clean.go b/pkg/registry/apis/provisioning/jobs/migrate/clean.go index c1c606db71d..c82bbdaea2f 100644 --- a/pkg/registry/apis/provisioning/jobs/migrate/clean.go +++ b/pkg/registry/apis/provisioning/jobs/migrate/clean.go @@ -4,12 +4,13 @@ import ( "context" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) //go:generate mockery --name NamespaceCleaner --structname MockNamespaceCleaner --inpackage --filename mock_namespace_cleaner.go --with-expecter @@ -40,10 +41,10 @@ func (c *namespaceCleaner) Clean(ctx context.Context, namespace string, progress if err = resources.ForEach(ctx, client, func(item *unstructured.Unstructured) error { result := jobs.JobResourceResult{ - Name: item.GetName(), - Resource: item.GetKind(), - Group: item.GroupVersionKind().Group, - Action: repository.FileActionDeleted, + Name: item.GetName(), + Kind: item.GetKind(), + Group: item.GroupVersionKind().Group, + Action: repository.FileActionDeleted, } // Skip provisioned resources - only delete unprovisioned (unmanaged) resources @@ -63,7 +64,7 @@ func (c *namespaceCleaner) Clean(ctx context.Context, namespace string, progress } if err := client.Delete(ctx, item.GetName(), metav1.DeleteOptions{}); err != nil { - result.Error = fmt.Errorf("deleting resource %s/%s %s: %w", result.Group, result.Resource, result.Name, err) + result.Error = fmt.Errorf("deleting resource %s/%s %s: %w", result.Group, result.Kind, result.Name, err) progress.Record(ctx, result) return fmt.Errorf("delete resource: %w", err) } diff --git a/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources.go b/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources.go index 5252945cad9..b7b4bb45a4f 100644 --- a/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources.go +++ b/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources.go @@ -204,11 +204,11 @@ func (r *legacyResourceResourceMigrator) Write(ctx context.Context, key *resourc } result := jobs.JobResourceResult{ - Name: parsed.Meta.GetName(), - Resource: r.kind.Resource, - Group: r.kind.Group, - Action: repository.FileActionCreated, - Path: fileName, + Name: parsed.Meta.GetName(), + Group: r.kind.Group, + Kind: parsed.GVK.Kind, + Action: repository.FileActionCreated, + Path: fileName, } if err != nil { diff --git a/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources_test.go b/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources_test.go index 3fc6a01e011..89077848f25 100644 --- a/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources_test.go +++ b/pkg/registry/apis/provisioning/jobs/migrate/legacy_resources_test.go @@ -667,7 +667,7 @@ func TestLegacyResourceResourceMigrator_Write(t *testing.T) { return result.Action == repository.FileActionCreated && result.Name == "test" && result.Error == nil && - result.Resource == "tests" && + result.Kind == "" && // empty kind result.Group == "test.grafana.app" && result.Path == "test/path" })).Return() diff --git a/pkg/registry/apis/provisioning/jobs/progress.go b/pkg/registry/apis/provisioning/jobs/progress.go index 3286ce37a29..f9cea399865 100644 --- a/pkg/registry/apis/provisioning/jobs/progress.go +++ b/pkg/registry/apis/provisioning/jobs/progress.go @@ -35,12 +35,12 @@ func maybeNotifyProgress(threshold time.Duration, fn ProgressFn) ProgressFn { // FIXME: ProgressRecorder should be initialized in the queue type JobResourceResult struct { - Name string - Resource string - Group string - Path string - Action repository.FileAction - Error error + Name string + Group string + Kind string + Path string + Action repository.FileAction + Error error } type jobProgressRecorder struct { @@ -73,7 +73,7 @@ func (r *jobProgressRecorder) Record(ctx context.Context, result JobResourceResu r.mu.Lock() r.resultCount++ - logger := logging.FromContext(ctx).With("path", result.Path, "resource", result.Resource, "group", result.Group, "action", result.Action, "name", result.Name) + logger := logging.FromContext(ctx).With("path", result.Path, "group", result.Group, "kind", result.Kind, "action", result.Action, "name", result.Name) if result.Error != nil { logger.Error("job resource operation failed", "err", result.Error) if len(r.errors) < 20 { @@ -173,12 +173,12 @@ func (r *jobProgressRecorder) summary() []*provisioning.JobResourceSummary { func (r *jobProgressRecorder) updateSummary(result JobResourceResult) { // Note: This method is called from Record() which already holds the lock - key := result.Resource + ":" + result.Group + key := result.Group + ":" + result.Kind summary, exists := r.summaries[key] if !exists { summary = &provisioning.JobResourceSummary{ - Resource: result.Resource, - Group: result.Group, + Group: result.Group, + Kind: result.Kind, } r.summaries[key] = summary } diff --git a/pkg/registry/apis/provisioning/jobs/sync/full.go b/pkg/registry/apis/provisioning/jobs/sync/full.go index 0e0b107171a..075f2fbac5f 100644 --- a/pkg/registry/apis/provisioning/jobs/sync/full.go +++ b/pkg/registry/apis/provisioning/jobs/sync/full.go @@ -4,15 +4,16 @@ import ( "context" "fmt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" "github.com/grafana/grafana/apps/provisioning/pkg/safepath" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" ) func FullSync( @@ -92,9 +93,7 @@ func applyChanges(ctx context.Context, changes []ResourceFileChange, clients res deleteSpan.End() continue } - result.Name = change.Existing.Name - result.Resource = change.Existing.Resource result.Group = change.Existing.Group versionlessGVR := schema.GroupVersionResource{ @@ -103,15 +102,17 @@ func applyChanges(ctx context.Context, changes []ResourceFileChange, clients res } // TODO: should we use the clients or the resource manager instead? - client, _, err := clients.ForResource(deleteCtx, versionlessGVR) + client, gvk, err := clients.ForResource(deleteCtx, versionlessGVR) if err != nil { + result.Kind = versionlessGVR.Resource // could not find a kind result.Error = fmt.Errorf("get client for deleted object: %w", err) progress.Record(deleteCtx, result) continue } + result.Kind = gvk.Kind if err := client.Delete(deleteCtx, change.Existing.Name, metav1.DeleteOptions{}); err != nil { - result.Error = fmt.Errorf("deleting resource %s/%s %s: %w", change.Existing.Group, change.Existing.Resource, change.Existing.Name, err) + result.Error = fmt.Errorf("deleting resource %s/%s %s: %w", change.Existing.Group, gvk.Kind, change.Existing.Name, err) } progress.Record(deleteCtx, result) deleteSpan.End() @@ -122,10 +123,10 @@ func applyChanges(ctx context.Context, changes []ResourceFileChange, clients res if safepath.IsDir(change.Path) { ensureFolderCtx, ensureFolderSpan := tracer.Start(ctx, "provisioning.sync.full.apply_changes.ensure_folder_exists") result := jobs.JobResourceResult{ - Path: change.Path, - Action: change.Action, - Resource: resources.FolderResource.Resource, - Group: resources.FolderResource.Group, + Path: change.Path, + Action: change.Action, + Group: resources.FolderKind.Group, + Kind: resources.FolderKind.Kind, } folder, err := repositoryResources.EnsureFolderPathExist(ensureFolderCtx, change.Path) @@ -147,11 +148,11 @@ func applyChanges(ctx context.Context, changes []ResourceFileChange, clients res writeCtx, writeSpan := tracer.Start(ctx, "provisioning.sync.full.apply_changes.write_resource_from_file") name, gvk, err := repositoryResources.WriteResourceFromFile(writeCtx, change.Path, "") result := jobs.JobResourceResult{ - Path: change.Path, - Action: change.Action, - Name: name, - Resource: gvk.Kind, - Group: gvk.Group, + Path: change.Path, + Action: change.Action, + Name: name, + Group: gvk.Group, + Kind: gvk.Kind, } if err != nil { diff --git a/pkg/registry/apis/provisioning/jobs/sync/full_test.go b/pkg/registry/apis/provisioning/jobs/sync/full_test.go index d5b3b610b80..90e5f4fb0c1 100644 --- a/pkg/registry/apis/provisioning/jobs/sync/full_test.go +++ b/pkg/registry/apis/provisioning/jobs/sync/full_test.go @@ -6,11 +6,6 @@ import ( "fmt" "testing" - provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" - "github.com/grafana/grafana/apps/provisioning/pkg/repository" - "github.com/grafana/grafana/pkg/infra/tracing" - "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" - "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,6 +13,12 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" dynamicfake "k8s.io/client-go/dynamic/fake" k8testing "k8s.io/client-go/testing" + + provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" + "github.com/grafana/grafana/pkg/infra/tracing" + "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" + "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources" ) func TestFullSync_ContextCancelled(t *testing.T) { @@ -209,11 +210,11 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionCreated, - Path: "dashboards/one.json", - Name: "test-dashboard", - Resource: "Dashboard", - Group: "dashboards", + Action: repository.FileActionCreated, + Path: "dashboards/one.json", + Name: "test-dashboard", + Kind: "Dashboard", + Group: "dashboards", }).Return() }, expectedError: "too many errors", @@ -234,11 +235,11 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionCreated, - Path: "dashboards/test.json", - Name: "test-dashboard", - Resource: "Dashboard", - Group: "dashboards", + Action: repository.FileActionCreated, + Path: "dashboards/test.json", + Name: "test-dashboard", + Kind: "Dashboard", + Group: "dashboards", }).Return() }, }, @@ -261,7 +262,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo return result.Action == repository.FileActionCreated && result.Path == "dashboards/test.json" && result.Name == "test-dashboard" && - result.Resource == "Dashboard" && + result.Kind == "Dashboard" && result.Group == "dashboards" && result.Error != nil && result.Error.Error() == "writing resource from file dashboards/test.json: write error" @@ -284,11 +285,11 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Return("test-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionUpdated, - Path: "dashboards/test.json", - Name: "test-dashboard", - Resource: "Dashboard", - Group: "dashboards", + Action: repository.FileActionUpdated, + Path: "dashboards/test.json", + Name: "test-dashboard", + Kind: "Dashboard", + Group: "dashboards", }).Return() }, }, @@ -311,7 +312,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo return result.Action == repository.FileActionUpdated && result.Path == "dashboards/test.json" && result.Name == "test-dashboard" && - result.Resource == "Dashboard" && + result.Kind == "Dashboard" && result.Group == "dashboards" && result.Error != nil && result.Error.Error() == "writing resource from file dashboards/test.json: write error" @@ -335,9 +336,8 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Action: repository.FileActionCreated, Path: "one/two/three/", Name: "some-folder", - // FIXME: this is probably inconsistent across the codebase - Resource: "folders", - Group: "folder.grafana.app", + Kind: "Folder", + Group: "folder.grafana.app", }).Return() }, }, @@ -362,7 +362,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo return result.Action == repository.FileActionCreated && result.Path == "one/two/three/" && result.Name == "" && - result.Resource == "folders" && + result.Kind == "Folder" && result.Group == "folder.grafana.app" && result.Error != nil && result.Error.Error() == "ensuring folder exists at path one/two/three/: folder creation error" @@ -378,7 +378,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Path: "dashboards/test.json", Existing: &provisioning.ResourceListItem{ Name: "test-dashboard", - Resource: "Dashboard", + Resource: "dashboards", Group: "dashboards", }, }, @@ -411,7 +411,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo clients.On("ForResource", mock.Anything, schema.GroupVersionResource{ Group: "dashboards", - Resource: "Dashboard", + Resource: "dashboards", }).Return(fakeDynamicClient.Resource(resources.DashboardResource), schema.GroupVersionKind{ Kind: "Dashboard", Group: "dashboards", @@ -419,12 +419,12 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo }, nil) progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionDeleted, - Path: "dashboards/test.json", - Name: "test-dashboard", - Resource: "Dashboard", - Group: "dashboards", - Error: nil, + Action: repository.FileActionDeleted, + Path: "dashboards/test.json", + Name: "test-dashboard", + Kind: "Dashboard", + Group: "dashboards", + Error: nil, }).Return() }, }, @@ -437,7 +437,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Path: "dashboards/test.json", Existing: &provisioning.ResourceListItem{ Name: "test-dashboard", - Resource: "Dashboard", + Resource: "dashboards", Group: "dashboards", }, }, @@ -470,7 +470,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo clients.On("ForResource", mock.Anything, schema.GroupVersionResource{ Group: "dashboards", - Resource: "Dashboard", + Resource: "dashboards", }).Return(fakeDynamicClient.Resource(resources.DashboardResource), schema.GroupVersionKind{ Kind: "Dashboard", Group: "dashboards", @@ -481,7 +481,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo return result.Action == repository.FileActionDeleted && result.Path == "dashboards/test.json" && result.Name == "test-dashboard" && - result.Resource == "Dashboard" && + result.Kind == "Dashboard" && result.Group == "dashboards" && result.Error != nil && result.Error.Error() == "deleting resource dashboards/Dashboard test-dashboard: delete failed" @@ -538,7 +538,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo Existing: &provisioning.ResourceListItem{ Name: "test-dashboard", Group: "dashboards", - Resource: "Dashboard", + Resource: "dashboards", }, }, }, @@ -547,16 +547,16 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo clients.On("ForResource", mock.Anything, schema.GroupVersionResource{ Group: "dashboards", - Resource: "Dashboard", + Resource: "dashboards", }).Return(nil, schema.GroupVersionKind{}, errors.New("didn't work")) progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Name: "test-dashboard", - Group: "dashboards", - Resource: "Dashboard", - Action: repository.FileActionDeleted, - Path: "dashboards/test.json", - Error: fmt.Errorf("get client for deleted object: %w", errors.New("didn't work")), + Name: "test-dashboard", + Group: "dashboards", + Kind: "dashboards", // could not find a real kind + Action: repository.FileActionDeleted, + Path: "dashboards/test.json", + Error: fmt.Errorf("get client for deleted object: %w", errors.New("didn't work")), }).Return() }, }, @@ -610,12 +610,12 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo }, nil) progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionDeleted, - Path: "to-be-deleted/", - Name: "test-folder", - Resource: "Folder", - Group: "folders", - Error: nil, + Action: repository.FileActionDeleted, + Path: "to-be-deleted/", + Name: "test-folder", + Kind: "Folder", + Group: "folders", + Error: nil, }).Return() }, }, @@ -672,7 +672,7 @@ func TestFullSync_ApplyChanges(t *testing.T) { //nolint:gocyclo return result.Action == repository.FileActionDeleted && result.Path == "to-be-deleted/" && result.Name == "test-folder" && - result.Resource == "Folder" && + result.Kind == "Folder" && result.Group == "folders" && result.Error != nil && result.Error.Error() == "deleting resource folders/Folder test-folder: delete failed" diff --git a/pkg/registry/apis/provisioning/jobs/sync/incremental.go b/pkg/registry/apis/provisioning/jobs/sync/incremental.go index f7448bffa16..0c4ddde659b 100644 --- a/pkg/registry/apis/provisioning/jobs/sync/incremental.go +++ b/pkg/registry/apis/provisioning/jobs/sync/incremental.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "fmt" "github.com/grafana/grafana/apps/provisioning/pkg/repository" @@ -9,6 +10,9 @@ import ( "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + apierrors "k8s.io/apimachinery/pkg/api/errors" ) // Convert git changes into resource file changes @@ -38,6 +42,11 @@ func IncrementalSync(ctx context.Context, repo repository.Versioned, previousRef progress.SetTotal(ctx, len(diff)) progress.SetMessage(ctx, "replicating versioned changes") + // this will keep track of any folders that had resources deleted from it + // with key-value as path:grafana uid. + // after cleaning up all resources, we will look to see if the foldrs are + // now empty, and if so, delete them. + affectedFolders := make(map[string]string) for _, change := range diff { if ctx.Err() != nil { return ctx.Err() @@ -63,11 +72,11 @@ func IncrementalSync(ctx context.Context, repo repository.Versioned, previousRef } progress.Record(ensureFolderCtx, jobs.JobResourceResult{ - Path: safeSegment, - Action: repository.FileActionCreated, - Resource: resources.FolderResource.Resource, - Group: resources.FolderResource.Group, - Name: folder, + Path: safeSegment, + Action: repository.FileActionCreated, + Group: resources.FolderResource.Group, + Kind: resources.FolderKind.Kind, + Name: folder, }) ensureFolderSpan.End() continue @@ -95,30 +104,40 @@ func IncrementalSync(ctx context.Context, repo repository.Versioned, previousRef result.Error = fmt.Errorf("writing resource from file %s: %w", change.Path, err) } result.Name = name - result.Resource = gvk.Kind + result.Kind = gvk.Kind result.Group = gvk.Group writeSpan.End() case repository.FileActionDeleted: removeCtx, removeSpan := tracer.Start(ctx, "provisioning.sync.incremental.remove_resource_from_file") - name, gvk, err := repositoryResources.RemoveResourceFromFile(removeCtx, change.Path, change.PreviousRef) + name, folderName, gvk, err := repositoryResources.RemoveResourceFromFile(removeCtx, change.Path, change.PreviousRef) if err != nil { removeSpan.RecordError(err) result.Error = fmt.Errorf("removing resource from file %s: %w", change.Path, err) } result.Name = name - result.Resource = gvk.Kind + result.Kind = gvk.Kind result.Group = gvk.Group + + if folderName != "" { + affectedFolders[safepath.Dir(change.Path)] = folderName + } + removeSpan.End() case repository.FileActionRenamed: renameCtx, renameSpan := tracer.Start(ctx, "provisioning.sync.incremental.rename_resource_file") - name, gvk, err := repositoryResources.RenameResourceFile(renameCtx, change.PreviousPath, change.PreviousRef, change.Path, change.Ref) + name, oldFolderName, gvk, err := repositoryResources.RenameResourceFile(renameCtx, change.PreviousPath, change.PreviousRef, change.Path, change.Ref) if err != nil { renameSpan.RecordError(err) result.Error = fmt.Errorf("renaming resource file from %s to %s: %w", change.PreviousPath, change.Path, err) } result.Name = name - result.Resource = gvk.Kind + result.Kind = gvk.Kind result.Group = gvk.Group + + if oldFolderName != "" { + affectedFolders[safepath.Dir(change.Path)] = oldFolderName + } + renameSpan.End() case repository.FileActionIgnored: // do nothing @@ -128,5 +147,50 @@ func IncrementalSync(ctx context.Context, repo repository.Versioned, previousRef progress.SetMessage(ctx, "versioned changes replicated") + if len(affectedFolders) > 0 { + span.AddEvent("checking if impacted folders should be deleted", trace.WithAttributes(attribute.Int("affected_folders", len(affectedFolders)))) + if err := cleanupOrphanedFolders(ctx, repo, affectedFolders, repositoryResources, tracer); err != nil { + return tracing.Error(span, fmt.Errorf("cleanup orphaned folders: %w", err)) + } + } + + return nil +} + +// cleanupOrphanedFolders removes folders that no longer contain any resources in git after deletions have occurred. +func cleanupOrphanedFolders( + ctx context.Context, + repo repository.Versioned, + affectedFolders map[string]string, + repositoryResources resources.RepositoryResources, + tracer tracing.Tracer, +) error { + ctx, span := tracer.Start(ctx, "provisioning.sync.incremental.cleanup_orphaned_folders") + defer span.End() + + readerRepo, ok := repo.(repository.Reader) + if !ok { + span.RecordError(fmt.Errorf("repository does not implement Reader")) + return nil + } + + for path, folderName := range affectedFolders { + span.SetAttributes(attribute.String("folder", folderName)) + + // if we can no longer find the folder in git, then we can delete it from grafana + _, err := readerRepo.Read(ctx, path, "") + if err != nil && (errors.Is(err, repository.ErrFileNotFound) || apierrors.IsNotFound(err)) { + span.AddEvent("folder not found in git, removing from grafana") + if err := repositoryResources.RemoveFolder(ctx, folderName); err != nil { + span.RecordError(err) + } else { + span.AddEvent("successfully deleted") + } + continue + } + + span.AddEvent("folder still exists in git, continuing") + } + return nil } diff --git a/pkg/registry/apis/provisioning/jobs/sync/incremental_test.go b/pkg/registry/apis/provisioning/jobs/sync/incremental_test.go index 6562be15f3c..95d595e8c6c 100644 --- a/pkg/registry/apis/provisioning/jobs/sync/incremental_test.go +++ b/pkg/registry/apis/provisioning/jobs/sync/incremental_test.go @@ -5,13 +5,14 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/registry/apis/provisioning/jobs" "github.com/grafana/grafana/pkg/registry/apis/provisioning/resources" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/runtime/schema" ) func TestIncrementalSync_ContextCancelled(t *testing.T) { @@ -131,11 +132,11 @@ func TestIncrementalSync(t *testing.T) { // Mock progress recording progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionCreated, - Path: "unsupported/path/", - Resource: resources.FolderResource.Resource, - Group: resources.FolderResource.Group, - Name: "test-folder", + Action: repository.FileActionCreated, + Path: "unsupported/path/", + Kind: resources.FolderKind.Kind, + Group: resources.FolderResource.Group, + Name: "test-folder", }).Return() progress.On("TooManyErrors").Return(nil) @@ -186,15 +187,15 @@ func TestIncrementalSync(t *testing.T) { // Mock resource deletion repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref"). - Return("old-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) + Return("old-dashboard", "", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) // Mock progress recording progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionDeleted, - Path: "dashboards/old.json", - Name: "old-dashboard", - Resource: "Dashboard", - Group: "dashboards", + Action: repository.FileActionDeleted, + Path: "dashboards/old.json", + Name: "old-dashboard", + Kind: "Dashboard", + Group: "dashboards", }).Return() progress.On("TooManyErrors").Return(nil) @@ -222,15 +223,15 @@ func TestIncrementalSync(t *testing.T) { // Mock resource rename repoResources.On("RenameResourceFile", mock.Anything, "dashboards/old.json", "old-ref", "dashboards/new.json", "new-ref"). - Return("renamed-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) + Return("renamed-dashboard", "", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) // Mock progress recording progress.On("Record", mock.Anything, jobs.JobResourceResult{ - Action: repository.FileActionRenamed, - Path: "dashboards/new.json", - Name: "renamed-dashboard", - Resource: "Dashboard", - Group: "dashboards", + Action: repository.FileActionRenamed, + Path: "dashboards/new.json", + Name: "renamed-dashboard", + Kind: "Dashboard", + Group: "dashboards", }).Return() progress.On("TooManyErrors").Return(nil) @@ -310,7 +311,7 @@ func TestIncrementalSync(t *testing.T) { return result.Action == repository.FileActionCreated && result.Path == "dashboards/test.json" && result.Name == "test-dashboard" && - result.Resource == "Dashboard" && + result.Kind == "Dashboard" && result.Group == "dashboards" && result.Error != nil && result.Error.Error() == "writing resource from file dashboards/test.json: write failed" @@ -339,14 +340,14 @@ func TestIncrementalSync(t *testing.T) { // Mock resource deletion error repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref"). - Return("old-dashboard", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, fmt.Errorf("delete failed")) + Return("old-dashboard", "", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, fmt.Errorf("delete failed")) // Mock progress recording with error progress.On("Record", mock.Anything, mock.MatchedBy(func(result jobs.JobResourceResult) bool { return result.Action == repository.FileActionDeleted && result.Path == "dashboards/old.json" && result.Name == "old-dashboard" && - result.Resource == "Dashboard" && + result.Kind == "Dashboard" && result.Group == "dashboards" && result.Error != nil && result.Error.Error() == "removing resource from file dashboards/old.json: delete failed" @@ -397,3 +398,126 @@ func TestIncrementalSync(t *testing.T) { }) } } + +type compositeRepo struct { + *repository.MockVersioned + *repository.MockReader +} + +func TestIncrementalSync_CleanupOrphanedFolders(t *testing.T) { + tests := []struct { + name string + setupMocks func(*compositeRepo, *resources.MockRepositoryResources, *jobs.MockJobProgressRecorder) + expectedError string + }{ + { + name: "delete folder when it no longer exists in git", + setupMocks: func(repo *compositeRepo, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) { + changes := []repository.VersionedFileChange{ + { + Action: repository.FileActionDeleted, + Path: "dashboards/old.json", + PreviousRef: "old-ref", + }, + } + repo.MockVersioned.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil) + progress.On("SetTotal", mock.Anything, 1).Return() + progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return() + progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return() + repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref"). + Return("old-dashboard", "folder-uid", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) + + // if the folder is not found in git, there should be a call to remove the folder from grafana + repo.MockReader.On("Read", mock.Anything, "dashboards/", ""). + Return((*repository.FileInfo)(nil), repository.ErrFileNotFound) + repoResources.On("RemoveFolder", mock.Anything, "folder-uid").Return(nil) + + progress.On("Record", mock.Anything, mock.Anything).Return() + progress.On("TooManyErrors").Return(nil) + }, + }, + { + name: "keep folder when it still exists in git", + setupMocks: func(repo *compositeRepo, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) { + changes := []repository.VersionedFileChange{ + { + Action: repository.FileActionDeleted, + Path: "dashboards/old.json", + PreviousRef: "old-ref", + }, + } + repo.MockVersioned.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil) + progress.On("SetTotal", mock.Anything, 1).Return() + progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return() + progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return() + repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref"). + Return("old-dashboard", "folder-uid", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) + // if the folder still exists in git, there should not be a call to delete it from grafana + repo.MockReader.On("Read", mock.Anything, "dashboards/", ""). + Return(&repository.FileInfo{}, nil) + + progress.On("Record", mock.Anything, mock.Anything).Return() + progress.On("TooManyErrors").Return(nil) + }, + }, + { + name: "delete multiple folders when they no longer exist in git", + setupMocks: func(repo *compositeRepo, repoResources *resources.MockRepositoryResources, progress *jobs.MockJobProgressRecorder) { + changes := []repository.VersionedFileChange{ + { + Action: repository.FileActionDeleted, + Path: "dashboards/old.json", + PreviousRef: "old-ref", + }, + { + Action: repository.FileActionDeleted, + Path: "alerts/old-alert.yaml", + PreviousRef: "old-ref", + }, + } + repo.MockVersioned.On("CompareFiles", mock.Anything, "old-ref", "new-ref").Return(changes, nil) + progress.On("SetTotal", mock.Anything, 2).Return() + progress.On("SetMessage", mock.Anything, "replicating versioned changes").Return() + progress.On("SetMessage", mock.Anything, "versioned changes replicated").Return() + repoResources.On("RemoveResourceFromFile", mock.Anything, "dashboards/old.json", "old-ref"). + Return("old-dashboard", "folder-uid-1", schema.GroupVersionKind{Kind: "Dashboard", Group: "dashboards"}, nil) + repoResources.On("RemoveResourceFromFile", mock.Anything, "alerts/old-alert.yaml", "old-ref"). + Return("old-alert", "folder-uid-2", schema.GroupVersionKind{Kind: "Alert", Group: "alerts"}, nil) + + // both not found in git, both should be deleted + repo.MockReader.On("Read", mock.Anything, "dashboards/", ""). + Return((*repository.FileInfo)(nil), repository.ErrFileNotFound) + repo.MockReader.On("Read", mock.Anything, "alerts/", ""). + Return((*repository.FileInfo)(nil), repository.ErrFileNotFound) + repoResources.On("RemoveFolder", mock.Anything, "folder-uid-1").Return(nil) + repoResources.On("RemoveFolder", mock.Anything, "folder-uid-2").Return(nil) + + progress.On("Record", mock.Anything, mock.Anything).Return() + progress.On("TooManyErrors").Return(nil) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockVersioned := repository.NewMockVersioned(t) + mockReader := repository.NewMockReader(t) + repo := &compositeRepo{ + MockVersioned: mockVersioned, + MockReader: mockReader, + } + repoResources := resources.NewMockRepositoryResources(t) + progress := jobs.NewMockJobProgressRecorder(t) + + tt.setupMocks(repo, repoResources, progress) + + err := IncrementalSync(context.Background(), repo, "old-ref", "new-ref", repoResources, progress, tracing.NewNoopTracerService()) + + if tt.expectedError != "" { + require.EqualError(t, err, tt.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/registry/apis/provisioning/jobs/sync/worker.go b/pkg/registry/apis/provisioning/jobs/sync/worker.go index 0b0139941c0..c5df4553e04 100644 --- a/pkg/registry/apis/provisioning/jobs/sync/worker.go +++ b/pkg/registry/apis/provisioning/jobs/sync/worker.go @@ -110,6 +110,10 @@ func (r *SyncWorker) Process(ctx context.Context, repo repository.Repository, jo lastRef := repo.Config().Status.Sync.LastRef syncStatus.LastRef = lastRef + if syncStatus.State == "" { + syncStatus.State = provisioning.JobStateWorking + } + // Update sync status at start using JSON patch patchOperations := []map[string]interface{}{ { @@ -135,15 +139,17 @@ func (r *SyncWorker) Process(ctx context.Context, repo repository.Repository, jo if err != nil { setupSpan.End() logger.Error("failed to create repository resources client", "error", err) - err = fmt.Errorf("create repository resources client: %w", err) - return tracing.Error(span, err) + setupError := fmt.Errorf("create repository resources client: %w", err) + progress.Complete(ctx, setupError) + return tracing.Error(span, setupError) } clients, err := r.clients.Clients(setupCtx, cfg.Namespace) if err != nil { setupSpan.End() logger.Error("failed to get clients for the repository", "error", err) - err = fmt.Errorf("get clients for %s: %w", cfg.Name, err) - return tracing.Error(span, err) + setupError := fmt.Errorf("get clients for %s: %w", cfg.Name, err) + progress.Complete(ctx, setupError) + return tracing.Error(span, setupError) } setupSpan.End() diff --git a/pkg/registry/apis/provisioning/jobs/sync/worker_test.go b/pkg/registry/apis/provisioning/jobs/sync/worker_test.go index 090d4deac87..bd8b094b4f6 100644 --- a/pkg/registry/apis/provisioning/jobs/sync/worker_test.go +++ b/pkg/registry/apis/provisioning/jobs/sync/worker_test.go @@ -153,10 +153,15 @@ func TestSyncWorker_Process(t *testing.T) { // Initial status update succeeds pr.On("SetMessage", mock.Anything, "update sync status at start").Return() - rpf.On("Execute", mock.Anything, repoConfig, mock.Anything).Return(nil) + rpf.On("Execute", mock.Anything, repoConfig, mock.Anything).Return(nil).Once() // Repository resources creation fails rrf.On("Client", mock.Anything, mock.Anything).Return(nil, errors.New("failed to create repository resources client")) + + // Progress.Complete should be called with the error + pr.On("Complete", mock.Anything, mock.MatchedBy(func(err error) bool { + return err != nil && err.Error() == "create repository resources client: failed to create repository resources client" + })).Return(provisioning.JobStatus{State: provisioning.JobStateError}) }, expectedError: "create repository resources client: failed to create repository resources client", }, @@ -185,13 +190,18 @@ func TestSyncWorker_Process(t *testing.T) { // Initial status update succeeds pr.On("SetMessage", mock.Anything, "update sync status at start").Return() - rpf.On("Execute", mock.Anything, repoConfig, mock.Anything).Return(nil) + rpf.On("Execute", mock.Anything, repoConfig, mock.Anything).Return(nil).Once() // Repository resources creation succeeds rrf.On("Client", mock.Anything, mock.Anything).Return(&resources.MockRepositoryResources{}, nil) // Getting clients for namespace fails cf.On("Clients", mock.Anything, "test-namespace").Return(nil, errors.New("failed to get clients")) + + // Progress.Complete should be called with the error + pr.On("Complete", mock.Anything, mock.MatchedBy(func(err error) bool { + return err != nil && err.Error() == "get clients for test-repo: failed to get clients" + })).Return(provisioning.JobStatus{State: provisioning.JobStateError}) }, expectedError: "get clients for test-repo: failed to get clients", }, diff --git a/pkg/registry/apis/provisioning/register.go b/pkg/registry/apis/provisioning/register.go index a63f42bb987..2aeb926be21 100644 --- a/pkg/registry/apis/provisioning/register.go +++ b/pkg/registry/apis/provisioning/register.go @@ -6,7 +6,6 @@ import ( "fmt" "net/http" "net/url" - "slices" "strings" "time" @@ -116,6 +115,7 @@ type APIBuilder struct { access authlib.AccessChecker statusPatcher *appcontroller.RepositoryStatusPatcher healthChecker *controller.HealthChecker + validator repository.RepositoryValidator // Extras provides additional functionality to the API. extras []Extra extraWorkers []jobs.Worker @@ -144,6 +144,7 @@ func NewAPIBuilder( allowedTargets []provisioning.SyncTargetType, restConfigGetter func(context.Context) (*clientrest.Config, error), allowImageRendering bool, + minSyncInterval time.Duration, registry prometheus.Registerer, newStandaloneClientFactoryFunc func(loopbackConfigProvider apiserver.RestConfigProvider) resources.ClientFactory, // optional, only used for standalone apiserver ) *APIBuilder { @@ -172,10 +173,11 @@ func NewAPIBuilder( access: access, jobHistoryConfig: jobHistoryConfig, extraWorkers: extraWorkers, - allowedTargets: allowedTargets, restConfigGetter: restConfigGetter, + allowedTargets: allowedTargets, allowImageRendering: allowImageRendering, registry: registry, + validator: repository.NewValidator(minSyncInterval, allowedTargets, allowImageRendering), } for _, builder := range extraBuilders { @@ -261,6 +263,7 @@ func RegisterAPIService( allowedTargets, nil, // will use loopback instead cfg.ProvisioningAllowImageRendering, + cfg.ProvisioningMinSyncInterval, reg, nil, ) @@ -287,7 +290,7 @@ func (b *APIBuilder) GetAuthorizer() authorizer.Authorizer { Name: a.GetName(), Namespace: a.GetNamespace(), Subresource: a.GetSubresource(), - }) + }, "") if err != nil { return authorizer.DecisionDeny, "failed to perform authorization", err } @@ -475,7 +478,7 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI storage[provisioning.RepositoryResourceInfo.StoragePath("status")] = repositoryStatusStorage // TODO: Add some logic so that the connectors can registered themselves and we don't have logic all over the place - storage[provisioning.RepositoryResourceInfo.StoragePath("test")] = NewTestConnector(b) + storage[provisioning.RepositoryResourceInfo.StoragePath("test")] = NewTestConnector(b, repository.NewRepositoryTesterWithExistingChecker(repository.NewSimpleRepositoryTester(b.validator), b.VerifyAgainstExistingRepositories)) storage[provisioning.RepositoryResourceInfo.StoragePath("files")] = NewFilesConnector(b, b.parsers, b.clients, b.access) storage[provisioning.RepositoryResourceInfo.StoragePath("refs")] = NewRefsConnector(b) storage[provisioning.RepositoryResourceInfo.StoragePath("resources")] = &listConnector{ @@ -485,7 +488,7 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI storage[provisioning.RepositoryResourceInfo.StoragePath("history")] = &historySubresource{ repoGetter: b, } - storage[provisioning.RepositoryResourceInfo.StoragePath("jobs")] = NewJobsConnector(b, b, jobHistory) + storage[provisioning.RepositoryResourceInfo.StoragePath("jobs")] = NewJobsConnector(b, b, b, jobHistory) // Add any extra storage for _, extra := range b.extras { @@ -576,24 +579,14 @@ func (b *APIBuilder) Validate(ctx context.Context, a admission.Attributes, o adm return err } - list := repository.ValidateRepository(repo) + // ALL configuration validations should be done in ValidateRepository - + // this is how the UI is able to show proper validation errors + // + // the only time to add configuration checks here is if you need to compare + // the incoming change to the current configuration + list := b.validator.ValidateRepository(repo) cfg := repo.Config() - if !slices.Contains(b.allowedTargets, cfg.Spec.Sync.Target) { - list = append(list, - field.Invalid( - field.NewPath("spec", "target"), - cfg.Spec.Sync.Target, - "sync target is not supported")) - } - - if !b.allowImageRendering && cfg.Spec.GitHub != nil && cfg.Spec.GitHub.GenerateDashboardPreviews { - list = append(list, - field.Invalid(field.NewPath("spec", "generateDashboardPreviews"), - cfg.Spec.GitHub.GenerateDashboardPreviews, - "image rendering is not enabled")) - } - if a.GetOperation() == admission.Update { oldRepo, err := b.asRepository(ctx, a.GetOldObject(), nil) if err != nil { @@ -669,7 +662,7 @@ func (b *APIBuilder) GetPostStartHooks() (map[string]genericapiserver.PostStartH } b.statusPatcher = appcontroller.NewRepositoryStatusPatcher(b.GetClient()) - b.healthChecker = controller.NewHealthChecker(b.statusPatcher, b.registry) + b.healthChecker = controller.NewHealthChecker(b.statusPatcher, b.registry, repository.NewSimpleRepositoryTester(b.validator)) // if running solely CRUD, skip the rest of the setup if b.onlyApiServer { diff --git a/pkg/registry/apis/provisioning/register_validate_test.go b/pkg/registry/apis/provisioning/register_validate_test.go new file mode 100644 index 00000000000..18b366e4de8 --- /dev/null +++ b/pkg/registry/apis/provisioning/register_validate_test.go @@ -0,0 +1,115 @@ +package provisioning + +import ( + "context" + "testing" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + + "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" + "github.com/grafana/grafana/apps/provisioning/pkg/repository" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestAPIBuilderValidate(t *testing.T) { + factory := repository.NewMockFactory(t) + mockRepo := repository.NewMockConfigRepository(t) + mockRepo.EXPECT().Validate().Return(nil) + factory.EXPECT().Build(mock.Anything, mock.Anything).Return(mockRepo, nil) + validator := repository.NewValidator(30*time.Second, []v0alpha1.SyncTargetType{v0alpha1.SyncTargetTypeFolder}, false) + b := &APIBuilder{ + repoFactory: factory, + allowedTargets: []v0alpha1.SyncTargetType{v0alpha1.SyncTargetTypeFolder}, + allowImageRendering: false, + validator: validator, + } + + t.Run("min sync interval is less than 10 seconds", func(t *testing.T) { + cfg := &v0alpha1.Repository{ + Spec: v0alpha1.RepositorySpec{ + Title: "repo", + Type: v0alpha1.GitHubRepositoryType, + Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeFolder, IntervalSeconds: 5}, + }, + } + mockRepo.EXPECT().Config().Return(cfg) + + obj := newRepoObj("repo1", "default", cfg.Spec, v0alpha1.RepositoryStatus{}) + err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil) + require.Error(t, err) + require.True(t, apierrors.IsInvalid(err)) + }) + + t.Run("image rendering is not enabled", func(t *testing.T) { + cfg2 := &v0alpha1.Repository{ + Spec: v0alpha1.RepositorySpec{ + Title: "repo", + Type: v0alpha1.GitHubRepositoryType, + Sync: v0alpha1.SyncOptions{Enabled: false, Target: v0alpha1.SyncTargetTypeFolder}, + GitHub: &v0alpha1.GitHubRepositoryConfig{URL: "https://github.com/acme/repo", Branch: "main", GenerateDashboardPreviews: true}, + }, + } + mockRepo.EXPECT().Config().Return(cfg2) + + obj := newRepoObj("repo2", "default", cfg2.Spec, v0alpha1.RepositoryStatus{}) + err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil) + require.Error(t, err) + require.True(t, apierrors.IsInvalid(err)) + }) + + t.Run("sync target is not supported", func(t *testing.T) { + cfg3 := &v0alpha1.Repository{ + Spec: v0alpha1.RepositorySpec{ + Title: "repo", + Type: v0alpha1.GitHubRepositoryType, + Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeInstance}, + }, + } + mockRepo.EXPECT().Config().Return(cfg3) + + obj := newRepoObj("repo3", "default", cfg3.Spec, v0alpha1.RepositoryStatus{}) + err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil) + require.Error(t, err) + require.True(t, apierrors.IsInvalid(err)) + }) +} + +func newRepoObj(name string, ns string, spec v0alpha1.RepositorySpec, status v0alpha1.RepositoryStatus) *v0alpha1.Repository { + return &v0alpha1.Repository{ + TypeMeta: metav1.TypeMeta{APIVersion: v0alpha1.APIVERSION, Kind: "Repository"}, + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: spec, + Status: status, + } +} + +func newAttributes(obj, old runtime.Object, op admission.Operation) admission.Attributes { + return admission.NewAttributesRecord( + obj, + old, + v0alpha1.RepositoryResourceInfo.GroupVersionKind(), + "default", + func() string { + if obj != nil { + return obj.(*v0alpha1.Repository).Name + } + if old != nil { + return old.(*v0alpha1.Repository).Name + } + return "" + }(), + v0alpha1.RepositoryResourceInfo.GroupVersionResource(), + "", + op, + nil, + false, + &user.DefaultInfo{}, + ) +} diff --git a/pkg/registry/apis/provisioning/resources/client.go b/pkg/registry/apis/provisioning/resources/client.go index 6a68a8add31..f12bcf7b053 100644 --- a/pkg/registry/apis/provisioning/resources/client.go +++ b/pkg/registry/apis/provisioning/resources/client.go @@ -22,6 +22,7 @@ import ( var ( UserResource = iam.UserResourceInfo.GroupVersionResource() FolderResource = folders.FolderResourceInfo.GroupVersionResource() + FolderKind = folders.FolderResourceInfo.GroupVersionKind() DashboardResource = dashboardV1.DashboardResourceInfo.GroupVersionResource() DashboardResourceV2alpha1 = dashboardV2alpha1.DashboardResourceInfo.GroupVersionResource() DashboardResourceV2beta1 = dashboardV2beta1.DashboardResourceInfo.GroupVersionResource() diff --git a/pkg/registry/apis/provisioning/resources/dualwriter.go b/pkg/registry/apis/provisioning/resources/dualwriter.go index b9af3416364..052771af700 100644 --- a/pkg/registry/apis/provisioning/resources/dualwriter.go +++ b/pkg/registry/apis/provisioning/resources/dualwriter.go @@ -515,9 +515,8 @@ func (r *DualReadWriter) authorize(ctx context.Context, parsed *ParsedResource, Resource: parsed.GVR.Resource, Namespace: id.GetNamespace(), Name: name, - Folder: parsed.Meta.GetFolder(), Verb: verb, - }) + }, parsed.Meta.GetFolder()) if err != nil || !rsp.Allowed { return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(), fmt.Errorf("no access to read the embedded file")) diff --git a/pkg/registry/apis/provisioning/resources/folders.go b/pkg/registry/apis/provisioning/resources/folders.go index ba6388b31cf..b74ab67976a 100644 --- a/pkg/registry/apis/provisioning/resources/folders.go +++ b/pkg/registry/apis/provisioning/resources/folders.go @@ -175,6 +175,10 @@ func (fm *FolderManager) GetFolder(ctx context.Context, name string) (*unstructu return fm.client.Get(ctx, name, metav1.GetOptions{}) } +func (fm *FolderManager) RemoveFolder(ctx context.Context, name string) error { + return fm.client.Delete(ctx, name, metav1.DeleteOptions{}) +} + // ReplicateTree replicates the folder tree to the repository. // The function fn is called for each folder. // If the folder already exists, the function is called with created set to false. diff --git a/pkg/registry/apis/provisioning/resources/repository.go b/pkg/registry/apis/provisioning/resources/repository.go index 67236d93cd5..6a9e16fc5a2 100644 --- a/pkg/registry/apis/provisioning/resources/repository.go +++ b/pkg/registry/apis/provisioning/resources/repository.go @@ -28,13 +28,14 @@ type RepositoryResources interface { EnsureFolderPathExist(ctx context.Context, filePath string) (parent string, err error) EnsureFolderExists(ctx context.Context, folder Folder, parentID string) error EnsureFolderTreeExists(ctx context.Context, ref, path string, tree FolderTree, fn func(folder Folder, created bool, err error) error) error + RemoveFolder(ctx context.Context, folderName string) error // File from Resource WriteResourceFileFromObject(ctx context.Context, obj *unstructured.Unstructured, options WriteOptions) (string, error) // Resource from file WriteResourceFromFile(ctx context.Context, path, ref string) (string, schema.GroupVersionKind, error) - RemoveResourceFromFile(ctx context.Context, path, ref string) (string, schema.GroupVersionKind, error) + RemoveResourceFromFile(ctx context.Context, path, ref string) (string, string, schema.GroupVersionKind, error) FindResourcePath(ctx context.Context, name string, gvk schema.GroupVersionKind) (string, error) - RenameResourceFile(ctx context.Context, path, previousRef, newPath, newRef string) (string, schema.GroupVersionKind, error) + RenameResourceFile(ctx context.Context, path, previousRef, newPath, newRef string) (string, string, schema.GroupVersionKind, error) // Stats Stats(ctx context.Context) (*provisioning.ResourceStats, error) List(ctx context.Context) (*provisioning.ResourceList, error) diff --git a/pkg/registry/apis/provisioning/resources/repository_resources_mock.go b/pkg/registry/apis/provisioning/resources/repository_resources_mock.go index 978d91f1d4d..1628971ff9e 100644 --- a/pkg/registry/apis/provisioning/resources/repository_resources_mock.go +++ b/pkg/registry/apis/provisioning/resources/repository_resources_mock.go @@ -297,8 +297,55 @@ func (_c *MockRepositoryResources_List_Call) RunAndReturn(run func(context.Conte return _c } +// RemoveFolder provides a mock function with given fields: ctx, folderName +func (_m *MockRepositoryResources) RemoveFolder(ctx context.Context, folderName string) error { + ret := _m.Called(ctx, folderName) + + if len(ret) == 0 { + panic("no return value specified for RemoveFolder") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, folderName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockRepositoryResources_RemoveFolder_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveFolder' +type MockRepositoryResources_RemoveFolder_Call struct { + *mock.Call +} + +// RemoveFolder is a helper method to define mock.On call +// - ctx context.Context +// - folderName string +func (_e *MockRepositoryResources_Expecter) RemoveFolder(ctx interface{}, folderName interface{}) *MockRepositoryResources_RemoveFolder_Call { + return &MockRepositoryResources_RemoveFolder_Call{Call: _e.mock.On("RemoveFolder", ctx, folderName)} +} + +func (_c *MockRepositoryResources_RemoveFolder_Call) Run(run func(ctx context.Context, folderName string)) *MockRepositoryResources_RemoveFolder_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockRepositoryResources_RemoveFolder_Call) Return(_a0 error) *MockRepositoryResources_RemoveFolder_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockRepositoryResources_RemoveFolder_Call) RunAndReturn(run func(context.Context, string) error) *MockRepositoryResources_RemoveFolder_Call { + _c.Call.Return(run) + return _c +} + // RemoveResourceFromFile provides a mock function with given fields: ctx, path, ref -func (_m *MockRepositoryResources) RemoveResourceFromFile(ctx context.Context, path string, ref string) (string, schema.GroupVersionKind, error) { +func (_m *MockRepositoryResources) RemoveResourceFromFile(ctx context.Context, path string, ref string) (string, string, schema.GroupVersionKind, error) { ret := _m.Called(ctx, path, ref) if len(ret) == 0 { @@ -306,9 +353,10 @@ func (_m *MockRepositoryResources) RemoveResourceFromFile(ctx context.Context, p } var r0 string - var r1 schema.GroupVersionKind - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (string, schema.GroupVersionKind, error)); ok { + var r1 string + var r2 schema.GroupVersionKind + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (string, string, schema.GroupVersionKind, error)); ok { return rf(ctx, path, ref) } if rf, ok := ret.Get(0).(func(context.Context, string, string) string); ok { @@ -317,19 +365,25 @@ func (_m *MockRepositoryResources) RemoveResourceFromFile(ctx context.Context, p r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(context.Context, string, string) schema.GroupVersionKind); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, string) string); ok { r1 = rf(ctx, path, ref) } else { - r1 = ret.Get(1).(schema.GroupVersionKind) + r1 = ret.Get(1).(string) } - if rf, ok := ret.Get(2).(func(context.Context, string, string) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, string, string) schema.GroupVersionKind); ok { r2 = rf(ctx, path, ref) } else { - r2 = ret.Error(2) + r2 = ret.Get(2).(schema.GroupVersionKind) } - return r0, r1, r2 + if rf, ok := ret.Get(3).(func(context.Context, string, string) error); ok { + r3 = rf(ctx, path, ref) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 } // MockRepositoryResources_RemoveResourceFromFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveResourceFromFile' @@ -352,18 +406,18 @@ func (_c *MockRepositoryResources_RemoveResourceFromFile_Call) Run(run func(ctx return _c } -func (_c *MockRepositoryResources_RemoveResourceFromFile_Call) Return(_a0 string, _a1 schema.GroupVersionKind, _a2 error) *MockRepositoryResources_RemoveResourceFromFile_Call { - _c.Call.Return(_a0, _a1, _a2) +func (_c *MockRepositoryResources_RemoveResourceFromFile_Call) Return(_a0 string, _a1 string, _a2 schema.GroupVersionKind, _a3 error) *MockRepositoryResources_RemoveResourceFromFile_Call { + _c.Call.Return(_a0, _a1, _a2, _a3) return _c } -func (_c *MockRepositoryResources_RemoveResourceFromFile_Call) RunAndReturn(run func(context.Context, string, string) (string, schema.GroupVersionKind, error)) *MockRepositoryResources_RemoveResourceFromFile_Call { +func (_c *MockRepositoryResources_RemoveResourceFromFile_Call) RunAndReturn(run func(context.Context, string, string) (string, string, schema.GroupVersionKind, error)) *MockRepositoryResources_RemoveResourceFromFile_Call { _c.Call.Return(run) return _c } // RenameResourceFile provides a mock function with given fields: ctx, path, previousRef, newPath, newRef -func (_m *MockRepositoryResources) RenameResourceFile(ctx context.Context, path string, previousRef string, newPath string, newRef string) (string, schema.GroupVersionKind, error) { +func (_m *MockRepositoryResources) RenameResourceFile(ctx context.Context, path string, previousRef string, newPath string, newRef string) (string, string, schema.GroupVersionKind, error) { ret := _m.Called(ctx, path, previousRef, newPath, newRef) if len(ret) == 0 { @@ -371,9 +425,10 @@ func (_m *MockRepositoryResources) RenameResourceFile(ctx context.Context, path } var r0 string - var r1 schema.GroupVersionKind - var r2 error - if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (string, schema.GroupVersionKind, error)); ok { + var r1 string + var r2 schema.GroupVersionKind + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (string, string, schema.GroupVersionKind, error)); ok { return rf(ctx, path, previousRef, newPath, newRef) } if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) string); ok { @@ -382,19 +437,25 @@ func (_m *MockRepositoryResources) RenameResourceFile(ctx context.Context, path r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) schema.GroupVersionKind); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) string); ok { r1 = rf(ctx, path, previousRef, newPath, newRef) } else { - r1 = ret.Get(1).(schema.GroupVersionKind) + r1 = ret.Get(1).(string) } - if rf, ok := ret.Get(2).(func(context.Context, string, string, string, string) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, string, string, string, string) schema.GroupVersionKind); ok { r2 = rf(ctx, path, previousRef, newPath, newRef) } else { - r2 = ret.Error(2) + r2 = ret.Get(2).(schema.GroupVersionKind) } - return r0, r1, r2 + if rf, ok := ret.Get(3).(func(context.Context, string, string, string, string) error); ok { + r3 = rf(ctx, path, previousRef, newPath, newRef) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 } // MockRepositoryResources_RenameResourceFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RenameResourceFile' @@ -419,12 +480,12 @@ func (_c *MockRepositoryResources_RenameResourceFile_Call) Run(run func(ctx cont return _c } -func (_c *MockRepositoryResources_RenameResourceFile_Call) Return(_a0 string, _a1 schema.GroupVersionKind, _a2 error) *MockRepositoryResources_RenameResourceFile_Call { - _c.Call.Return(_a0, _a1, _a2) +func (_c *MockRepositoryResources_RenameResourceFile_Call) Return(_a0 string, _a1 string, _a2 schema.GroupVersionKind, _a3 error) *MockRepositoryResources_RenameResourceFile_Call { + _c.Call.Return(_a0, _a1, _a2, _a3) return _c } -func (_c *MockRepositoryResources_RenameResourceFile_Call) RunAndReturn(run func(context.Context, string, string, string, string) (string, schema.GroupVersionKind, error)) *MockRepositoryResources_RenameResourceFile_Call { +func (_c *MockRepositoryResources_RenameResourceFile_Call) RunAndReturn(run func(context.Context, string, string, string, string) (string, string, schema.GroupVersionKind, error)) *MockRepositoryResources_RenameResourceFile_Call { _c.Call.Return(run) return _c } diff --git a/pkg/registry/apis/provisioning/resources/resources.go b/pkg/registry/apis/provisioning/resources/resources.go index 027f04671ff..b66e16917da 100644 --- a/pkg/registry/apis/provisioning/resources/resources.go +++ b/pkg/registry/apis/provisioning/resources/resources.go @@ -238,44 +238,63 @@ func (r *ResourcesManager) WriteResourceFromFile(ctx context.Context, path strin return parsed.Obj.GetName(), parsed.GVK, err } -func (r *ResourcesManager) RenameResourceFile(ctx context.Context, previousPath, previousRef, newPath, newRef string) (string, schema.GroupVersionKind, error) { - name, gvk, err := r.RemoveResourceFromFile(ctx, previousPath, previousRef) +func (r *ResourcesManager) RenameResourceFile(ctx context.Context, previousPath, previousRef, newPath, newRef string) (string, string, schema.GroupVersionKind, error) { + name, oldFolderName, gvk, err := r.RemoveResourceFromFile(ctx, previousPath, previousRef) if err != nil { - return name, gvk, fmt.Errorf("failed to remove resource: %w", err) + return name, oldFolderName, gvk, fmt.Errorf("failed to remove resource: %w", err) } - return r.WriteResourceFromFile(ctx, newPath, newRef) + newName, gvk, err := r.WriteResourceFromFile(ctx, newPath, newRef) + if err != nil { + return name, oldFolderName, gvk, fmt.Errorf("failed to write resource: %w", err) + } + + return newName, oldFolderName, gvk, nil } -func (r *ResourcesManager) RemoveResourceFromFile(ctx context.Context, path string, ref string) (string, schema.GroupVersionKind, error) { +func (r *ResourcesManager) RemoveResourceFromFile(ctx context.Context, path string, ref string) (string, string, schema.GroupVersionKind, error) { info, err := r.repo.Read(ctx, path, ref) if err != nil { - return "", schema.GroupVersionKind{}, fmt.Errorf("failed to read file: %w", err) + return "", "", schema.GroupVersionKind{}, fmt.Errorf("failed to read file: %w", err) } obj, gvk, _ := DecodeYAMLObject(bytes.NewBuffer(info.Data)) if obj == nil { - return "", schema.GroupVersionKind{}, fmt.Errorf("no object found") + return "", "", schema.GroupVersionKind{}, fmt.Errorf("no object found") } objName := obj.GetName() if objName == "" { - return "", schema.GroupVersionKind{}, ErrMissingName + return "", "", schema.GroupVersionKind{}, ErrMissingName } client, _, err := r.clients.ForKind(ctx, *gvk) if err != nil { - return "", schema.GroupVersionKind{}, fmt.Errorf("unable to get client for deleted object: %w", err) + return "", "", schema.GroupVersionKind{}, fmt.Errorf("unable to get client for deleted object: %w", err) } + // the folder annotation is not stored in the git file, so we need to get it from grafana + grafanaObj, err := client.Get(ctx, objName, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return objName, "", schema.GroupVersionKind{}, nil // Already deleted or simply non-existing, nothing to do + } + return "", "", schema.GroupVersionKind{}, fmt.Errorf("unable to get grafana object: %w", err) + } + meta, err := utils.MetaAccessor(grafanaObj) + if err != nil { + return "", "", schema.GroupVersionKind{}, fmt.Errorf("unable to get meta accessor: %w", err) + } + folderName := meta.GetFolder() + err = client.Delete(ctx, objName, metav1.DeleteOptions{}) if err != nil { if apierrors.IsNotFound(err) { - return objName, schema.GroupVersionKind{}, nil // Already deleted or simply non-existing, nothing to do + return objName, folderName, schema.GroupVersionKind{}, nil // Already deleted or simply non-existing, nothing to do } - return "", schema.GroupVersionKind{}, fmt.Errorf("failed to delete: %w", err) + return "", "", schema.GroupVersionKind{}, fmt.Errorf("failed to delete: %w", err) } - return objName, schema.GroupVersionKind{}, nil + return objName, folderName, schema.GroupVersionKind{}, nil } diff --git a/pkg/registry/apis/provisioning/test.go b/pkg/registry/apis/provisioning/test.go index b664e9e9039..43e28bdb334 100644 --- a/pkg/registry/apis/provisioning/test.go +++ b/pkg/registry/apis/provisioning/test.go @@ -31,7 +31,6 @@ type HealthCheckerProvider interface { type ConnectorDependencies interface { RepoGetter HealthCheckerProvider - repository.RepositoryValidator GetRepoFactory() repository.Factory } @@ -39,15 +38,15 @@ type testConnector struct { getter RepoGetter factory repository.Factory healthProvider HealthCheckerProvider - validator repository.RepositoryValidator + tester repository.RepositoryTesterWithExistingChecker } -func NewTestConnector(deps ConnectorDependencies) *testConnector { +func NewTestConnector(deps ConnectorDependencies, tester repository.RepositoryTesterWithExistingChecker) *testConnector { return &testConnector{ factory: deps.GetRepoFactory(), getter: deps, healthProvider: deps, - validator: deps, + tester: tester, } } @@ -186,7 +185,7 @@ func (s *testConnector) Connect(ctx context.Context, name string, opts runtime.O } } else { // Testing temporary repository - just run test without status update - rsp, err = repository.TestRepositoryWithValidator(ctx, repo, s.validator) + rsp, err = s.tester.TestRepositoryAndCheckExisting(ctx, repo) if err != nil { responder.Error(err) return diff --git a/pkg/registry/apis/secret/contracts/decrypt.go b/pkg/registry/apis/secret/contracts/decrypt.go index a90d4047015..2f8cc669b34 100644 --- a/pkg/registry/apis/secret/contracts/decrypt.go +++ b/pkg/registry/apis/secret/contracts/decrypt.go @@ -26,5 +26,5 @@ type DecryptStorage interface { // DecryptAuthorizer is the interface for authorizing decryption requests. type DecryptAuthorizer interface { - Authorize(ctx context.Context, namespace xkube.Namespace, secureValueName string, secureValueDecrypters []string, owner []metav1.OwnerReference) (identity string, allowed bool) + Authorize(ctx context.Context, namespace xkube.Namespace, secureValueName string, secureValueDecrypters []string, owner []metav1.OwnerReference) (identity string, allowed bool, reason string) } diff --git a/pkg/registry/apis/secret/decrypt/authorizer.go b/pkg/registry/apis/secret/decrypt/authorizer.go index d307c005af5..3974c48f745 100644 --- a/pkg/registry/apis/secret/decrypt/authorizer.go +++ b/pkg/registry/apis/secret/decrypt/authorizer.go @@ -37,7 +37,13 @@ func ProvideDecryptAuthorizer( } // Authorize checks whether the auth info token has the right permissions to decrypt the secure value. -func (a *decryptAuthorizer) Authorize(ctx context.Context, ns xkube.Namespace, secureValueName string, secureValueDecrypters []string, owners []metav1.OwnerReference) (id string, isAllowed bool) { +func (a *decryptAuthorizer) Authorize( + ctx context.Context, + ns xkube.Namespace, + secureValueName string, + secureValueDecrypters []string, + owners []metav1.OwnerReference, +) (id string, isAllowed bool, reason string) { ctx, span := a.tracer.Start(ctx, "DecryptAuthorizer.Authorize", trace.WithAttributes( attribute.String("name", secureValueName), attribute.StringSlice("decrypters", secureValueDecrypters), @@ -53,37 +59,37 @@ func (a *decryptAuthorizer) Authorize(ctx context.Context, ns xkube.Namespace, s authInfo, ok := claims.AuthInfoFrom(ctx) if !ok { - return "", false + return "", false, "no auth info in context" } if !claims.NamespaceMatches(authInfo.GetNamespace(), ns.String()) { - return "", false + return "", false, "namespace in token does not match the passed namespace" } serviceIdentityList, ok := authInfo.GetExtra()[authn.ServiceIdentityKey] if !ok { - return "", false + return "", false, "no service identity in token" } // If there's more than one service identity, something is suspicious and we reject it. if len(serviceIdentityList) != 1 { - return "", false + return "", false, "more than one service identity in token" } serviceIdentity := strings.TrimSpace(serviceIdentityList[0]) if len(serviceIdentity) == 0 { - return "", false + return "", false, "empty service identity in token" } // Checks whether the token has the permission to decrypt secure values. if !hasPermissionInToken(authInfo.GetTokenPermissions(), secureValueName) { - return serviceIdentity, false + return serviceIdentity, false, "token does not have permission to decrypt secure values" } // Check whether the service identity is allowed to decrypt this secure value. for _, decrypter := range secureValueDecrypters { if decrypter == serviceIdentity { - return serviceIdentity, true + return serviceIdentity, true, "" } } @@ -93,14 +99,14 @@ func (a *decryptAuthorizer) Authorize(ctx context.Context, ns xkube.Namespace, s if extra.Identity == serviceIdentity { for _, owner := range owners { if strings.HasPrefix(owner.APIVersion, extra.Group) { - return serviceIdentity, true + return serviceIdentity, true, "" } } } } } - return serviceIdentity, false + return serviceIdentity, false, "service identity is not in the secure value decrypters" } // Adapted from https://github.com/grafana/authlib/blob/1492b99410603ca15730a1805a9220ce48232bc3/authz/client.go#L138 diff --git a/pkg/registry/apis/secret/decrypt/authorizer_test.go b/pkg/registry/apis/secret/decrypt/authorizer_test.go index fd52958e100..89074572587 100644 --- a/pkg/registry/apis/secret/decrypt/authorizer_test.go +++ b/pkg/registry/apis/secret/decrypt/authorizer_test.go @@ -23,7 +23,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := context.Background() authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.Empty(t, identity) require.False(t, allowed) }) @@ -32,7 +32,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -41,7 +41,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "", []string{}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.Empty(t, identity) require.False(t, allowed) }) @@ -50,7 +50,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), " ", []string{}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.Empty(t, identity) require.False(t, allowed) }) @@ -60,13 +60,13 @@ func TestDecryptAuthorizer(t *testing.T) { // nameless ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues"}) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) // named ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues/name"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -76,13 +76,13 @@ func TestDecryptAuthorizer(t *testing.T) { // nameless ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues:*"}) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) // named ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues/name:something"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -91,7 +91,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -100,7 +100,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"wrong.group/securevalues/invalid:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -110,13 +110,13 @@ func TestDecryptAuthorizer(t *testing.T) { // nameless ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/invalid-resource:decrypt"}) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) // named ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/invalid-resource/name:decrypt"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "", nil, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "", nil, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -125,7 +125,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.NotEmpty(t, identity) require.True(t, allowed) }) @@ -135,13 +135,13 @@ func TestDecryptAuthorizer(t *testing.T) { // nameless ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues:decrypt"}) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"group2"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"group2"}, nil) require.NotEmpty(t, identity) require.False(t, allowed) // named ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues/name:decrypt"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "", []string{"group2"}, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "", []string{"group2"}, nil) require.NotEmpty(t, identity) require.False(t, allowed) }) @@ -151,13 +151,13 @@ func TestDecryptAuthorizer(t *testing.T) { // nameless ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues:decrypt"}) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.True(t, allowed) require.Equal(t, "identity", identity) // named ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues/name:decrypt"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "name", []string{"identity"}, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "name", []string{"identity"}, nil) require.True(t, allowed) require.Equal(t, "identity", identity) }) @@ -172,11 +172,11 @@ func TestDecryptAuthorizer(t *testing.T) { }) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "name1", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "name1", []string{"identity"}, nil) require.True(t, allowed) require.Equal(t, "identity", identity) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "name2", []string{"identity"}, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "name2", []string{"identity"}, nil) require.True(t, allowed) require.Equal(t, "identity", identity) }) @@ -185,7 +185,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues/name:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.Equal(t, "identity", identity) require.False(t, allowed) }) @@ -194,7 +194,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues/:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.Equal(t, "identity", identity) require.False(t, allowed) }) @@ -203,7 +203,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "name", []string{}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "name", []string{}, nil) require.Equal(t, "identity", identity) require.False(t, allowed) }) @@ -212,7 +212,7 @@ func TestDecryptAuthorizer(t *testing.T) { ctx := createAuthContext(context.Background(), defaultNs.String(), "identity1", []string{"secret.grafana.app/securevalues:decrypt"}) authorizer := ProvideDecryptAuthorizer(tracer, nil) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"identity1", "identity2", "identity3"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"identity1", "identity2", "identity3"}, nil) require.Equal(t, "identity1", identity) require.True(t, allowed) }) @@ -226,7 +226,7 @@ func TestDecryptAuthorizer(t *testing.T) { }, }) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{}, []metav1.OwnerReference{ + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{}, []metav1.OwnerReference{ { APIVersion: "test.grafana.app/v1", Kind: "Test", @@ -246,7 +246,7 @@ func TestDecryptAuthorizer(t *testing.T) { }, }) - _, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{}, []metav1.OwnerReference{ + _, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{}, []metav1.OwnerReference{ { APIVersion: "test.grafana.app/v1", Kind: "Test", @@ -265,7 +265,7 @@ func TestDecryptAuthorizer(t *testing.T) { }, }) - _, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{}, []metav1.OwnerReference{ + _, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{}, []metav1.OwnerReference{ { APIVersion: "test.grafana.app/v1", Kind: "Test", @@ -278,17 +278,17 @@ func TestDecryptAuthorizer(t *testing.T) { authorizer := ProvideDecryptAuthorizer(tracer, nil) ctx := createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"SECRET.grafana.app/securevalues:decrypt"}) - identity, allowed := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.Equal(t, "identity", identity) require.False(t, allowed) ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/SECUREVALUES:decrypt"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.Equal(t, "identity", identity) require.False(t, allowed) ctx = createAuthContext(context.Background(), defaultNs.String(), "identity", []string{"secret.grafana.app/securevalues:DECRYPT"}) - identity, allowed = authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) + identity, allowed, _ = authorizer.Authorize(ctx, defaultNs, "", []string{"identity"}, nil) require.Equal(t, "identity", identity) require.False(t, allowed) }) @@ -297,7 +297,7 @@ func TestDecryptAuthorizer(t *testing.T) { authorizer := ProvideDecryptAuthorizer(tracer, nil) ctx := createAuthContext(context.Background(), "namespace1", "identity", []string{"secret.grafana.app/securevalues:decrypt"}) - identity, allowed := authorizer.Authorize(ctx, "namespace2", "", []string{"identity"}, nil) + identity, allowed, _ := authorizer.Authorize(ctx, "namespace2", "", []string{"identity"}, nil) require.Empty(t, identity) require.False(t, allowed) }) diff --git a/pkg/registry/apis/secret/decrypt/noop_authorizer.go b/pkg/registry/apis/secret/decrypt/noop_authorizer.go index 2ab299e32c6..6c9c6f263df 100644 --- a/pkg/registry/apis/secret/decrypt/noop_authorizer.go +++ b/pkg/registry/apis/secret/decrypt/noop_authorizer.go @@ -14,6 +14,6 @@ type NoopAlwaysAllowedAuthorizer struct{} var _ contracts.DecryptAuthorizer = &NoopAlwaysAllowedAuthorizer{} -func (a *NoopAlwaysAllowedAuthorizer) Authorize(context.Context, xkube.Namespace, string, []string, []metav1.OwnerReference) (string, bool) { - return "", true +func (a *NoopAlwaysAllowedAuthorizer) Authorize(context.Context, xkube.Namespace, string, []string, []metav1.OwnerReference) (string, bool, string) { + return "", true, "" } diff --git a/pkg/registry/apis/secret/inline/inline_secure_value.go b/pkg/registry/apis/secret/inline/inline_secure_value.go index 8ad810532c4..f6805938554 100644 --- a/pkg/registry/apis/secret/inline/inline_secure_value.go +++ b/pkg/registry/apis/secret/inline/inline_secure_value.go @@ -142,7 +142,7 @@ func (s *LocalInlineSecureValueService) canIdentityReadSecureValue(ctx context.C Resource: secretv1beta1.SecureValuesResourceInfo.GroupResource().Resource, Namespace: namespace.String(), Name: name, - }) + }, "") if err != nil { return fmt.Errorf("checking access for secure value %s: %w", name, err) } diff --git a/pkg/registry/backgroundsvcs/adapter/dependencies.go b/pkg/registry/backgroundsvcs/adapter/dependencies.go index 20e31754490..6256ebeda8a 100644 --- a/pkg/registry/backgroundsvcs/adapter/dependencies.go +++ b/pkg/registry/backgroundsvcs/adapter/dependencies.go @@ -3,9 +3,21 @@ package adapter import ( "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/modules" + "github.com/grafana/grafana/pkg/services/pluginsintegration/plugininstaller" + "github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore" + "github.com/grafana/grafana/pkg/services/provisioning" ) const ( + // PluginStore is the module name for the plugin store service. + PluginStore = pluginstore.ServiceName + + // PluginInstaller is the module name for the plugin installer service. + PluginInstaller = plugininstaller.ServiceName + + // Provisioning is the module name for the provisioning service. + Provisioning = provisioning.ServiceName + // Tracing is the module name for the tracing service. Tracing = tracing.ServiceName @@ -29,7 +41,10 @@ func dependencyMap() map[string][]string { return map[string][]string{ Tracing: {}, GrafanaAPIServer: {Tracing}, - Core: {GrafanaAPIServer}, + PluginStore: {GrafanaAPIServer}, + PluginInstaller: {PluginStore}, + Provisioning: {PluginStore, PluginInstaller}, + Core: {GrafanaAPIServer, PluginStore, PluginInstaller, Provisioning}, BackgroundServices: {Core}, } } diff --git a/pkg/registry/backgroundsvcs/adapter/manager.go b/pkg/registry/backgroundsvcs/adapter/manager.go index edd84418bd8..8db7d9e9c2c 100644 --- a/pkg/registry/backgroundsvcs/adapter/manager.go +++ b/pkg/registry/backgroundsvcs/adapter/manager.go @@ -14,7 +14,7 @@ import ( ) var ( - stopTimeout = 30 * time.Second + stopTimeout = 5 * time.Second ) type ManagerAdapter struct { @@ -63,6 +63,7 @@ func (m *ManagerAdapter) starting(ctx context.Context) error { // skip disabled services if s, ok := bgSvc.(registry.CanBeDisabled); ok && s.IsDisabled() { logger.Debug("Skipping disabled service", "service", namedService.ServiceName()) + manager.RegisterInvisibleModule(namedService.ServiceName(), nil) continue } diff --git a/pkg/registry/backgroundsvcs/adapter/service.go b/pkg/registry/backgroundsvcs/adapter/service.go index 037d7f392b2..50bf24b4a2d 100644 --- a/pkg/registry/backgroundsvcs/adapter/service.go +++ b/pkg/registry/backgroundsvcs/adapter/service.go @@ -18,9 +18,10 @@ var _ services.NamedService = &serviceAdapter{} // The adapter uses dskit's BasicService with a custom RunningFn: // - Starting phase: No-op, transitions immediately to Running // - Running phase: Delegates to the wrapped service's Run method -// - Stopping phase: No-op, transitions immediately to Terminated/Failed +// - Stopping phase: Closes the stop channel to signal the service to stop type serviceAdapter struct { - *services.BasicService + services.NamedService + stopCh chan struct{} name string service registry.BackgroundService } @@ -36,8 +37,9 @@ func asNamedService(service registry.BackgroundService) *serviceAdapter { a := &serviceAdapter{ name: name, service: service, + stopCh: make(chan struct{}), } - a.BasicService = services.NewBasicService(nil, a.run, nil).WithName(name) + a.NamedService = services.NewBasicService(nil, a.running, a.stopping).WithName(name) return a } @@ -46,13 +48,24 @@ func asNamedService(service registry.BackgroundService) *serviceAdapter { // background service's Run method. If the background service completes without // error, the adapter waits for context cancellation (service stop) before // transitioning to Stopping state, ensuring proper dskit service lifecycle. -func (a *serviceAdapter) run(ctx context.Context) error { - err := a.service.Run(ctx) +func (a *serviceAdapter) running(ctx context.Context) error { + serviceCtx, serviceCancel := context.WithCancel(ctx) + go func() { + <-a.stopCh + serviceCancel() + }() + + err := a.service.Run(serviceCtx) if err != nil && !errors.Is(err, context.Canceled) { return err } // wait for context cancellation to transition to Stopping state. // this prevents the service from causing it's dependents to stop prematurely. - <-ctx.Done() + <-serviceCtx.Done() + return nil +} + +func (a *serviceAdapter) stopping(_ error) error { + close(a.stopCh) return nil } diff --git a/pkg/registry/backgroundsvcs/adapter/service_test.go b/pkg/registry/backgroundsvcs/adapter/service_test.go index 368c596d395..ed357d66443 100644 --- a/pkg/registry/backgroundsvcs/adapter/service_test.go +++ b/pkg/registry/backgroundsvcs/adapter/service_test.go @@ -16,7 +16,7 @@ func TestAsNamedService(t *testing.T) { adapter := asNamedService(mockSvc) require.NotNil(t, adapter) - require.NotNil(t, adapter.BasicService) + require.NotNil(t, adapter.NamedService) require.Equal(t, mockSvc, adapter.service) expectedName := reflect.TypeOf(mockSvc).String() diff --git a/pkg/server/module_registerer.go b/pkg/server/module_registerer.go new file mode 100644 index 00000000000..8a459588937 --- /dev/null +++ b/pkg/server/module_registerer.go @@ -0,0 +1,20 @@ +package server + +import ( + "github.com/grafana/grafana/pkg/modules" +) + +// ModuleRegisterer is used to inject enterprise dskit modules into +// the module manager. This abstraction allows other builds (e.g. enterprise) to register +// additional modules while keeping the core server decoupled from build-specific dependencies. +type ModuleRegisterer interface { + RegisterModules(manager modules.Registry) +} + +type noopModuleRegisterer struct{} + +func (noopModuleRegisterer) RegisterModules(manager modules.Registry) {} + +func ProvideNoopModuleRegisterer() ModuleRegisterer { + return &noopModuleRegisterer{} +} diff --git a/pkg/server/module_server.go b/pkg/server/module_server.go index 0b477c3aa97..5c420b9d219 100644 --- a/pkg/server/module_server.go +++ b/pkg/server/module_server.go @@ -44,8 +44,9 @@ func NewModule(opts Options, promGatherer prometheus.Gatherer, tracer tracing.Tracer, // Ensures tracing is initialized license licensing.Licensing, + moduleRegisterer ModuleRegisterer, ) (*ModuleServer, error) { - s, err := newModuleServer(opts, apiOpts, features, cfg, storageMetrics, indexMetrics, reg, promGatherer, license) + s, err := newModuleServer(opts, apiOpts, features, cfg, storageMetrics, indexMetrics, reg, promGatherer, license, moduleRegisterer) if err != nil { return nil, err } @@ -66,6 +67,7 @@ func newModuleServer(opts Options, reg prometheus.Registerer, promGatherer prometheus.Gatherer, license licensing.Licensing, + moduleRegisterer ModuleRegisterer, ) (*ModuleServer, error) { rootCtx, shutdownFn := context.WithCancel(context.Background()) @@ -87,6 +89,7 @@ func newModuleServer(opts Options, promGatherer: promGatherer, registerer: reg, license: license, + moduleRegisterer: moduleRegisterer, } return s, nil @@ -124,6 +127,9 @@ type ModuleServer struct { httpServerRouter *mux.Router searchServerRing *ring.Ring searchServerRingClientPool *ringclient.Pool + + // moduleRegisterer allows registration of modules provided by other builds (e.g. enterprise). + moduleRegisterer ModuleRegisterer } // init initializes the server and its services. @@ -187,7 +193,7 @@ func (s *ModuleServer) Run() error { if err != nil { return nil, err } - return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig) + return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig, s.httpServerRouter) }) m.RegisterModule(modules.ZanzanaServer, func() (services.Service, error) { @@ -202,6 +208,9 @@ func (s *ModuleServer) Run() error { m.RegisterModule(modules.All, nil) + // Register modules provided by other builds (e.g. enterprise). + s.moduleRegisterer.RegisterModules(m) + return m.Run(s.context) } diff --git a/pkg/server/search_server_distributor_test.go b/pkg/server/search_server_distributor_test.go index 18b5431ed89..e7304876688 100644 --- a/pkg/server/search_server_distributor_test.go +++ b/pkg/server/search_server_distributor_test.go @@ -326,7 +326,7 @@ func initModuleServerForTest( ) testModuleServer { tracer := tracing.InitializeTracerForTest() - ms, err := NewModule(opts, apiOpts, featuremgmt.WithFeatures(featuremgmt.FlagUnifiedStorageSearch), cfg, nil, nil, prometheus.NewRegistry(), prometheus.DefaultGatherer, tracer, nil) + ms, err := NewModule(opts, apiOpts, featuremgmt.WithFeatures(featuremgmt.FlagUnifiedStorageSearch), cfg, nil, nil, prometheus.NewRegistry(), prometheus.DefaultGatherer, tracer, nil, ProvideNoopModuleRegisterer()) require.NoError(t, err) conn, err := grpc.NewClient(cfg.GRPCServer.Address, diff --git a/pkg/server/wire_gen.go b/pkg/server/wire_gen.go index 49a0f3c48a0..68c3743cc8f 100644 --- a/pkg/server/wire_gen.go +++ b/pkg/server/wire_gen.go @@ -548,10 +548,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api } errorRegistry := pluginerrs.ProvideErrorTracker() loaderLoader := loader.ProvideService(pluginManagementCfg, discovery, bootstrap, validate, initialize, terminate, errorRegistry) - pluginstoreService, err := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) - if err != nil { - return nil, err - } + pluginstoreService := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) filestoreService := filestore.ProvideService(inMemory) fileStoreManager := dashboards.ProvideFileStoreManager(pluginstoreService, filestoreService) folderPermissionsService, err := ossaccesscontrol.ProvideFolderPermissions(cfg, featureToggles, routeRegisterImpl, sqlStore, accessControl, ossLicensingService, folderimplService, acimplService, teamService, userService, actionSetService) @@ -706,7 +703,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api searchHTTPService := searchV2.ProvideSearchHTTPService(searchService) statsService := statsimpl.ProvideService(cfg, sqlStore, dashboardService, folderimplService, orgService, resourceClient, featureToggles) gatherer := metrics.ProvideGatherer() - apiAPI := api3.ProvideApi(starService, dashboardService) + apiAPI := api3.ProvideApi(cfg, featureToggles, starService, eventualRestConfigProvider) anonUserLimitValidatorImpl := validator2.ProvideAnonUserLimitValidator() anonDeviceService := anonimpl.ProvideAnonymousDeviceService(usageStats, authnService, sqlStore, cfg, orgService, serverLockService, accessControl, routeRegisterImpl, anonUserLimitValidatorImpl) signingkeysimplService, err := signingkeysimpl.ProvideEmbeddedSigningKeysService(sqlStore, secretsService, remoteCache, routeRegisterImpl) @@ -815,7 +812,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api apiService := api4.ProvideService(cfg, routeRegisterImpl, accessControl, userService, authinfoimplService, ossGroups, identitySynchronizer, orgService, ldapImpl, userAuthTokenService, bundleregistryService) dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService) snapshotsAPIBuilder := dashboardsnapshot.RegisterAPIService(serviceImpl, apiserverService, cfg, featureToggles, sqlStore, registerer) - dataSourceAPIBuilder, err := datasource.RegisterAPIService(featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, accessControl, registerer) + dataSourceAPIBuilder, err := datasource.RegisterAPIService(configProvider, featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, accessControl, registerer) if err != nil { return nil, err } @@ -1157,10 +1154,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac } errorRegistry := pluginerrs.ProvideErrorTracker() loaderLoader := loader.ProvideService(pluginManagementCfg, discovery, bootstrap, validate, initialize, terminate, errorRegistry) - pluginstoreService, err := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) - if err != nil { - return nil, err - } + pluginstoreService := pluginstore.ProvideService(inMemory, sourcesService, loaderLoader) filestoreService := filestore.ProvideService(inMemory) fileStoreManager := dashboards.ProvideFileStoreManager(pluginstoreService, filestoreService) folderPermissionsService, err := ossaccesscontrol.ProvideFolderPermissions(cfg, featureToggles, routeRegisterImpl, sqlStore, accessControl, ossLicensingService, folderimplService, acimplService, teamService, userService, actionSetService) @@ -1317,7 +1311,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac searchHTTPService := searchV2.ProvideSearchHTTPService(searchService) statsService := statsimpl.ProvideService(cfg, sqlStore, dashboardService, folderimplService, orgService, resourceClient, featureToggles) gatherer := metrics.ProvideGathererForTest(registerer) - apiAPI := api3.ProvideApi(starService, dashboardService) + apiAPI := api3.ProvideApi(cfg, featureToggles, starService, eventualRestConfigProvider) anonUserLimitValidatorImpl := validator2.ProvideAnonUserLimitValidator() anonDeviceService := anonimpl.ProvideAnonymousDeviceService(usageStats, authnService, sqlStore, cfg, orgService, serverLockService, accessControl, routeRegisterImpl, anonUserLimitValidatorImpl) signingkeysimplService, err := signingkeysimpl.ProvideEmbeddedSigningKeysService(sqlStore, secretsService, remoteCache, routeRegisterImpl) @@ -1426,7 +1420,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac apiService := api4.ProvideService(cfg, routeRegisterImpl, accessControl, userService, authinfoimplService, ossGroups, identitySynchronizer, orgService, ldapImpl, userAuthTokenService, bundleregistryService) dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService) snapshotsAPIBuilder := dashboardsnapshot.RegisterAPIService(serviceImpl, apiserverService, cfg, featureToggles, sqlStore, registerer) - dataSourceAPIBuilder, err := datasource.RegisterAPIService(featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, accessControl, registerer) + dataSourceAPIBuilder, err := datasource.RegisterAPIService(configProvider, featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, accessControl, registerer) if err != nil { return nil, err } @@ -1636,7 +1630,8 @@ func InitializeModuleServer(cfg *setting.Cfg, opts Options, apiOpts api.ServerOp } hooksService := hooks.ProvideService() ossLicensingService := licensing.ProvideService(cfg, hooksService) - moduleServer, err := NewModule(opts, apiOpts, featureToggles, cfg, storageMetrics, bleveIndexMetrics, registerer, gatherer, tracingService, ossLicensingService) + moduleRegisterer := ProvideNoopModuleRegisterer() + moduleServer, err := NewModule(opts, apiOpts, featureToggles, cfg, storageMetrics, bleveIndexMetrics, registerer, gatherer, tracingService, ossLicensingService, moduleRegisterer) if err != nil { return nil, err } diff --git a/pkg/server/wireexts_oss.go b/pkg/server/wireexts_oss.go index 5ba3b26347f..6b2163f4e34 100644 --- a/pkg/server/wireexts_oss.go +++ b/pkg/server/wireexts_oss.go @@ -191,6 +191,8 @@ var wireExtsModuleServerSet = wire.NewSet( // Unified storage resource.ProvideStorageMetrics, resource.ProvideIndexMetrics, + // Overriden by enterprise + ProvideNoopModuleRegisterer, ) var wireExtsStandaloneAPIServerSet = wire.NewSet( diff --git a/pkg/services/accesscontrol/authorizer.go b/pkg/services/accesscontrol/authorizer.go index a1bc224eae8..ac180fb2ac7 100644 --- a/pkg/services/accesscontrol/authorizer.go +++ b/pkg/services/accesscontrol/authorizer.go @@ -85,7 +85,7 @@ type LegacyAccessClient struct { opts map[string]ResourceAuthorizerOptions } -func (c *LegacyAccessClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest) (claims.CheckResponse, error) { +func (c *LegacyAccessClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest, folder string) (claims.CheckResponse, error) { ident, ok := id.(identity.Requester) if !ok { return claims.CheckResponse{}, errors.New("expected identity.Requester for legacy access control") @@ -140,6 +140,9 @@ func (c *LegacyAccessClient) Check(ctx context.Context, id claims.AuthInfo, req return claims.CheckResponse{}, err } + // NOTE: folder is looked up again in the evaluator: + // pkg/services/accesscontrol/acimpl/accesscontrol.go#L77 + return claims.CheckResponse{Allowed: allowed}, nil } diff --git a/pkg/services/accesscontrol/authorizer_test.go b/pkg/services/accesscontrol/authorizer_test.go index 515186f2a57..04853bdd7ee 100644 --- a/pkg/services/accesscontrol/authorizer_test.go +++ b/pkg/services/accesscontrol/authorizer_test.go @@ -24,7 +24,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Resource: "dashboards", Namespace: "default", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, false, res.Allowed) }) @@ -47,7 +47,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, false, res.Allowed) @@ -70,7 +70,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Verb: "list", Namespace: "default", Resource: "dashboards", - }) + }, "") assert.NoError(t, err) assert.Equal(t, true, res.Allowed) @@ -94,7 +94,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, true, res.Allowed) @@ -119,7 +119,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, true, res.Allowed) @@ -129,7 +129,7 @@ func TestLegacyAccessClient_Check(t *testing.T) { Namespace: "default", Resource: "dashboards", Name: "1", - }) + }, "") assert.NoError(t, err) assert.Equal(t, false, res.Allowed) diff --git a/pkg/services/apiserver/auth/authorizer/resource.go b/pkg/services/apiserver/auth/authorizer/resource.go index 96b8e8dd4cb..b86f6b40f09 100644 --- a/pkg/services/apiserver/auth/authorizer/resource.go +++ b/pkg/services/apiserver/auth/authorizer/resource.go @@ -36,7 +36,7 @@ func (r ResourceAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri Name: attr.GetName(), Subresource: attr.GetSubresource(), Path: attr.GetPath(), - }) + }, "") // NOTE: we do not know the folder in this context if err != nil { return authorizer.DecisionDeny, "", err diff --git a/pkg/services/apiserver/restconfig.go b/pkg/services/apiserver/restconfig.go index ee822e1917e..e3fdbfe7dd9 100644 --- a/pkg/services/apiserver/restconfig.go +++ b/pkg/services/apiserver/restconfig.go @@ -5,8 +5,9 @@ import ( "errors" "net/http" - contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" clientrest "k8s.io/client-go/rest" + + contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" ) type RestConfigProvider interface { diff --git a/pkg/services/apiserver/service.go b/pkg/services/apiserver/service.go index 69381a2bc99..86706dcbb48 100644 --- a/pkg/services/apiserver/service.go +++ b/pkg/services/apiserver/service.go @@ -85,7 +85,6 @@ type service struct { features featuremgmt.FeatureToggles log log.Logger - stopCh chan struct{} stoppedCh chan error db db.DB @@ -148,7 +147,6 @@ func ProvideService( cfg: cfg, features: features, rr: rr, - stopCh: make(chan struct{}), builders: []builder.APIGroupBuilder{}, authorizer: authorizer.NewGrafanaBuiltInSTAuthorizer(cfg), tracing: tracing, @@ -242,11 +240,8 @@ func (s *service) Run(ctx context.Context) error { if err := s.StartAsync(ctx); err != nil { return err } - - if err := s.AwaitRunning(ctx); err != nil { - return err - } - return s.AwaitTerminated(ctx) + stopCtx := context.Background() + return s.AwaitTerminated(stopCtx) } func (s *service) RegisterAPI(b builder.APIGroupBuilder) { diff --git a/pkg/services/authz/rbac/service.go b/pkg/services/authz/rbac/service.go index 0650fd7daf5..105f7db7157 100644 --- a/pkg/services/authz/rbac/service.go +++ b/pkg/services/authz/rbac/service.go @@ -232,12 +232,18 @@ func (s *Service) List(ctx context.Context, req *authzv1.ListRequest) (*authzv1. } resp, err := s.listPermission(ctx, permissions, listReq) - if cacheHit && time.Duration(time.Now().Unix()-resp.Zookie.Timestamp) < s.settings.CacheTTL { - resp.Zookie = &authzv1.Zookie{Timestamp: time.Now().Add(-s.settings.CacheTTL).Unix()} + s.metrics.requestCount.WithLabelValues(strconv.FormatBool(err != nil), "true", req.GetVerb(), req.GetGroup(), req.GetResource()).Inc() + if err != nil { + return nil, err } - s.metrics.requestCount.WithLabelValues(strconv.FormatBool(err != nil), "true", req.GetVerb(), req.GetGroup(), req.GetResource()).Inc() - return resp, err + if resp != nil && resp.Zookie != nil { + if cacheHit && time.Duration(time.Now().Unix()-resp.Zookie.Timestamp) < s.settings.CacheTTL { + resp.Zookie = &authzv1.Zookie{Timestamp: time.Now().Add(-s.settings.CacheTTL).Unix()} + } + } + + return resp, nil } func (s *Service) validateCheckRequest(ctx context.Context, req *authzv1.CheckRequest) (*checkRequest, error) { diff --git a/pkg/services/authz/zanzana/client/client.go b/pkg/services/authz/zanzana/client/client.go index 47b463a2e07..05266afba83 100644 --- a/pkg/services/authz/zanzana/client/client.go +++ b/pkg/services/authz/zanzana/client/client.go @@ -3,11 +3,12 @@ package client import ( "context" + "go.opentelemetry.io/otel" + "google.golang.org/grpc" + authzlib "github.com/grafana/authlib/authz" authzv1 "github.com/grafana/authlib/authz/proto/v1" authlib "github.com/grafana/authlib/types" - "go.opentelemetry.io/otel" - "google.golang.org/grpc" "github.com/grafana/grafana/pkg/infra/log" authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1" @@ -36,11 +37,11 @@ func New(cc grpc.ClientConnInterface) (*Client, error) { return c, nil } -func (c *Client) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (c *Client) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check") defer span.End() - return c.authzlibclient.Check(ctx, id, req) + return c.authzlibclient.Check(ctx, id, req, folder) } func (c *Client) Compile(ctx context.Context, id authlib.AuthInfo, req authlib.ListRequest) (authlib.ItemChecker, authlib.Zookie, error) { diff --git a/pkg/services/authz/zanzana/client/noop.go b/pkg/services/authz/zanzana/client/noop.go index 26fbbab573b..419a9b73201 100644 --- a/pkg/services/authz/zanzana/client/noop.go +++ b/pkg/services/authz/zanzana/client/noop.go @@ -16,7 +16,7 @@ func NewNoop() *NoopClient { type NoopClient struct{} -func (nc *NoopClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (nc *NoopClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { return authlib.CheckResponse{}, nil } diff --git a/pkg/services/authz/zanzana/client/shadow_client.go b/pkg/services/authz/zanzana/client/shadow_client.go index e2ba4e01c9c..4fb4c545ff0 100644 --- a/pkg/services/authz/zanzana/client/shadow_client.go +++ b/pkg/services/authz/zanzana/client/shadow_client.go @@ -3,9 +3,9 @@ package client import ( "context" - authlib "github.com/grafana/authlib/types" "github.com/prometheus/client_golang/prometheus" + authlib "github.com/grafana/authlib/types" "github.com/grafana/grafana/pkg/infra/log" ) @@ -29,7 +29,7 @@ func WithShadowClient(accessClient authlib.AccessClient, zanzanaClient authlib.A return client } -func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { acResChan := make(chan authlib.CheckResponse, 1) acErrChan := make(chan error, 1) @@ -42,7 +42,7 @@ func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authl defer timer.ObserveDuration() zanzanaCtx := context.WithoutCancel(ctx) - res, err := c.zanzanaClient.Check(zanzanaCtx, id, req) + res, err := c.zanzanaClient.Check(zanzanaCtx, id, req, folder) if err != nil { c.logger.Error("Failed to run zanzana check", "error", err) } @@ -61,7 +61,7 @@ func (c *ShadowClient) Check(ctx context.Context, id authlib.AuthInfo, req authl }() timer := prometheus.NewTimer(c.metrics.evaluationsSeconds.WithLabelValues("rbac")) - res, err := c.accessClient.Check(ctx, id, req) + res, err := c.accessClient.Check(ctx, id, req, folder) timer.ObserveDuration() acResChan <- res acErrChan <- err diff --git a/pkg/services/authz/zanzana/zanzana.go b/pkg/services/authz/zanzana/zanzana.go index adf2dbab531..d7d1b1dfd45 100644 --- a/pkg/services/authz/zanzana/zanzana.go +++ b/pkg/services/authz/zanzana/zanzana.go @@ -7,7 +7,6 @@ import ( openfgav1 "github.com/openfga/api/proto/openfga/v1" authlib "github.com/grafana/authlib/types" - "github.com/grafana/grafana/pkg/services/authz/zanzana/common" ) @@ -123,7 +122,7 @@ func MergeFolderResourceTuples(a, b *openfgav1.TupleKey) { va.GetListValue().Values = append(va.GetListValue().Values, vb.GetListValue().Values...) } -func TranslateToCheckRequest(namespace, action, kind, folder, name string) (*authlib.CheckRequest, bool) { +func TranslateToCheckRequest(namespace, action, kind, name string) (*authlib.CheckRequest, bool) { translation, ok := resourceTranslations[kind] if !ok { @@ -146,7 +145,6 @@ func TranslateToCheckRequest(namespace, action, kind, folder, name string) (*aut Group: translation.group, Resource: translation.resource, Name: name, - Folder: folder, } return req, true diff --git a/pkg/services/cloudmigration/cloudmigrationimpl/cloudmigration_test.go b/pkg/services/cloudmigration/cloudmigrationimpl/cloudmigration_test.go index e430611f3cd..f30feca4ee2 100644 --- a/pkg/services/cloudmigration/cloudmigrationimpl/cloudmigration_test.go +++ b/pkg/services/cloudmigration/cloudmigrationimpl/cloudmigration_test.go @@ -14,12 +14,10 @@ import ( "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/grafana/grafana/pkg/api/routing" "github.com/grafana/grafana/pkg/bus" - "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/infra/httpclient" "github.com/grafana/grafana/pkg/infra/kvstore" "github.com/grafana/grafana/pkg/infra/tracing" @@ -63,7 +61,7 @@ func Test_NoopServiceDoesNothing(t *testing.T) { func Test_CreateGetAndDeleteToken(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false) + s := setUpServiceTest(t) createResp, err := s.CreateToken(context.Background()) assert.NoError(t, err) @@ -88,7 +86,7 @@ func Test_GetSnapshotStatusFromGMS(t *testing.T) { t.Parallel() setupTest := func(ctx context.Context) (service *Service, snapshotUID string, sessionUID string) { - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) gmsClientFake := &gmsClientMock{} s.gmsClient = gmsClientFake @@ -365,7 +363,7 @@ func Test_GetSnapshotStatusFromGMS(t *testing.T) { func Test_OnlyQueriesStatusFromGMSWhenRequired(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) gmsClientMock := &gmsClientMock{ getSnapshotResponse: &cloudmigration.GetSnapshotStatusResponse{ @@ -427,14 +425,29 @@ func Test_OnlyQueriesStatusFromGMSWhenRequired(t *testing.T) { Status: status, }) assert.NoError(t, err) - _, err := s.GetSnapshot(context.Background(), cloudmigration.GetSnapshotsQuery{ + snapshot, err := s.GetSnapshot(context.Background(), cloudmigration.GetSnapshotsQuery{ SnapshotUID: uid, SessionUID: sess.UID, }) assert.NoError(t, err) - require.Eventually(t, func() bool { return gmsClientMock.GetSnapshotStatusCallCount() == i+1 }, time.Second, 10*time.Millisecond) + assert.Equal(t, status, snapshot.Status) + + require.Eventually( + t, + func() bool { return gmsClientMock.GetSnapshotStatusCallCount() == i+1 }, + 2*time.Second, + 100*time.Millisecond, + "GMS client mock GetSnapshotStatus count: %d", gmsClientMock.GetSnapshotStatusCallCount(), + ) } - assert.Never(t, func() bool { return gmsClientMock.GetSnapshotStatusCallCount() > 2 }, time.Second, 10*time.Millisecond) + + assert.Never( + t, + func() bool { return gmsClientMock.GetSnapshotStatusCallCount() > 2 }, + 2*time.Second, + 100*time.Millisecond, + "GMS client mock GetSnapshotStatus called more than expected: %d times", gmsClientMock.GetSnapshotStatusCallCount(), + ) } // Implementation inspired by ChatGPT, OpenAI's language model. @@ -463,7 +476,7 @@ func Test_SortFolders(t *testing.T) { func TestDeleteSession(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{UserUID: "user123"} t.Run("when deleting a session that does not exist in the database, it returns an error", func(t *testing.T) { @@ -515,7 +528,7 @@ func TestReportEvent(t *testing.T) { gmsMock := &gmsClientMock{} - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) s.gmsClient = gmsMock require.NotPanics(t, func() { @@ -533,7 +546,7 @@ func TestReportEvent(t *testing.T) { gmsMock := &gmsClientMock{} - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) s.gmsClient = gmsMock require.NotPanics(t, func() { @@ -547,7 +560,7 @@ func TestReportEvent(t *testing.T) { func TestGetFolderNamesForFolderUIDs(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -616,7 +629,7 @@ func TestGetParentNames(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{OrgID: 1} @@ -705,7 +718,7 @@ func TestGetParentNames(t *testing.T) { func TestGetLibraryElementsCommands(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -771,7 +784,7 @@ func TestIsPublicSignatureType(t *testing.T) { func TestGetPlugins(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -869,7 +882,7 @@ func TestGetPlugins(t *testing.T) { type configOverrides func(c *setting.Cfg) -func setUpServiceTest(t *testing.T, withDashboardMock bool, cfgOverrides ...configOverrides) cloudmigration.Service { +func setUpServiceTest(t *testing.T, cfgOverrides ...configOverrides) cloudmigration.Service { secretsService := secretsfakes.NewFakeSecretsService() rr := routing.NewRouteRegister() tracer := tracing.InitializeTracerForTest() @@ -888,17 +901,6 @@ func setUpServiceTest(t *testing.T, withDashboardMock bool, cfgOverrides ...conf cfg.CloudMigration.SnapshotFolder = filepath.Join(os.TempDir(), uuid.NewString()) dashboardService := dashboards.NewFakeDashboardService(t) - if withDashboardMock { - dashboardService.On("GetAllDashboards", mock.Anything).Return( - []*dashboards.Dashboard{ - { - UID: "1", - Data: simplejson.New(), - }, - }, - nil, - ) - } dsService := &datafakes.FakeDataSourceService{ DataSources: []*datasources.DataSource{ diff --git a/pkg/services/cloudmigration/cloudmigrationimpl/snapshot_mgmt_alerts_test.go b/pkg/services/cloudmigration/cloudmigrationimpl/snapshot_mgmt_alerts_test.go index b382035c150..151d55562f9 100644 --- a/pkg/services/cloudmigration/cloudmigrationimpl/snapshot_mgmt_alerts_test.go +++ b/pkg/services/cloudmigration/cloudmigrationimpl/snapshot_mgmt_alerts_test.go @@ -45,7 +45,7 @@ func TestGetAlertMuteTimings(t *testing.T) { t.Run("it returns the mute timings", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) s.features = featuremgmt.WithFeatures(featuremgmt.FlagOnPremToCloudMigrations) user := &user.SignedInUser{OrgID: 1} @@ -69,7 +69,7 @@ func TestGetNotificationTemplates(t *testing.T) { t.Run("it returns the notification templates", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{OrgID: 1} @@ -92,7 +92,7 @@ func TestGetContactPoints(t *testing.T) { t.Run("it returns the contact points", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{ OrgID: 1, @@ -115,7 +115,7 @@ func TestGetContactPoints(t *testing.T) { t.Run("it returns an error when user lacks permission to read contact point secrets", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{ OrgID: 1, @@ -144,7 +144,7 @@ func TestGetNotificationPolicies(t *testing.T) { t.Run("it returns the contact points", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{OrgID: 1} @@ -172,7 +172,7 @@ func TestGetAlertRules(t *testing.T) { t.Run("it returns the alert rules", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{OrgID: 1, Permissions: map[int64]map[string][]string{1: alertRulesPermissions}} @@ -191,7 +191,7 @@ func TestGetAlertRules(t *testing.T) { c.CloudMigration.AlertRulesState = setting.GMSAlertRulesPaused } - s := setUpServiceTest(t, false, alertRulesState).(*Service) + s := setUpServiceTest(t, alertRulesState).(*Service) user := &user.SignedInUser{OrgID: 1, Permissions: map[int64]map[string][]string{1: alertRulesPermissions}} @@ -218,7 +218,7 @@ func TestGetAlertRuleGroups(t *testing.T) { t.Run("it returns the alert rule groups", func(t *testing.T) { t.Parallel() - s := setUpServiceTest(t, false).(*Service) + s := setUpServiceTest(t).(*Service) user := &user.SignedInUser{OrgID: 1, Permissions: map[int64]map[string][]string{1: alertRulesPermissions}} @@ -257,7 +257,7 @@ func TestGetAlertRuleGroups(t *testing.T) { c.CloudMigration.AlertRulesState = setting.GMSAlertRulesPaused } - s := setUpServiceTest(t, false, alertRulesState).(*Service) + s := setUpServiceTest(t, alertRulesState).(*Service) user := &user.SignedInUser{OrgID: 1, Permissions: map[int64]map[string][]string{1: alertRulesPermissions}} diff --git a/pkg/services/dashboards/database/database.go b/pkg/services/dashboards/database/database.go index b6bfc346f51..e2909cbe46f 100644 --- a/pkg/services/dashboards/database/database.go +++ b/pkg/services/dashboards/database/database.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/services/dashboards" dashver "github.com/grafana/grafana/pkg/services/dashboardversion" "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/services/folder" "github.com/grafana/grafana/pkg/services/libraryelements/model" "github.com/grafana/grafana/pkg/services/quota" "github.com/grafana/grafana/pkg/services/sqlstore" @@ -96,6 +97,7 @@ func (d *dashboardStore) GetDashboardsByLibraryPanelUID(ctx context.Context, lib return connectedDashboards, err } +// nolint:gocyclo func (d *dashboardStore) ValidateDashboardBeforeSave(ctx context.Context, dash *dashboards.Dashboard, overwrite bool) (bool, error) { ctx, span := tracer.Start(ctx, "dashboards.database.ValidateDashboardBeforesave") defer span.End() @@ -107,7 +109,7 @@ func (d *dashboardStore) ValidateDashboardBeforeSave(ctx context.Context, dash * // we don't save FolderID in kubernetes object when saving through k8s // this block guarantees we save dashboards with folder_id and folder_uid in those cases - if !dash.IsFolder && dash.FolderUID != "" && dash.FolderID == 0 { // nolint:staticcheck + if !dash.IsFolder && dash.FolderUID != "" && dash.FolderID == 0 && dash.FolderUID != folder.GeneralFolderUID { // nolint:staticcheck var existing dashboards.Dashboard folderIdFound, err := sess.Where("uid=? AND org_id=?", dash.FolderUID, dash.OrgID).Get(&existing) if err != nil { diff --git a/pkg/apis/featuretoggle/v0alpha1/types.go b/pkg/services/featuremgmt/feature_toggle_api/types.go similarity index 96% rename from pkg/apis/featuretoggle/v0alpha1/types.go rename to pkg/services/featuremgmt/feature_toggle_api/types.go index 664ff1adec5..6b62447fa33 100644 --- a/pkg/apis/featuretoggle/v0alpha1/types.go +++ b/pkg/services/featuremgmt/feature_toggle_api/types.go @@ -1,4 +1,4 @@ -package v0alpha1 +package feature_toggle_api import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -6,6 +6,12 @@ import ( common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1" ) +const ( + GROUP = "featuretoggle.grafana.app" + VERSION = "v0alpha1" + APIVERSION = GROUP + "/" + VERSION +) + // Feature represents a feature in development and information about that feature // It does *not* know the status, only defines properties about the feature itself // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/services/featuremgmt/manager.go b/pkg/services/featuremgmt/manager.go index 457436790a5..5cfe588a518 100644 --- a/pkg/services/featuremgmt/manager.go +++ b/pkg/services/featuremgmt/manager.go @@ -6,7 +6,6 @@ import ( "reflect" "github.com/grafana/grafana/pkg/infra/log" - "github.com/grafana/grafana/pkg/setting" ) var ( @@ -14,10 +13,7 @@ var ( ) type FeatureManager struct { - isDevMod bool - restartRequired bool - - Settings setting.FeatureMgmtSettings + isDevMod bool flags map[string]*FeatureFlag enabled map[string]bool // only the "on" values @@ -131,66 +127,6 @@ func (fm *FeatureManager) GetFlags() []FeatureFlag { return v } -// isFeatureEditingAllowed checks if the backend is properly configured to allow feature toggle changes from the UI -func (fm *FeatureManager) IsFeatureEditingAllowed() bool { - return fm.Settings.AllowEditing && fm.Settings.UpdateWebhook != "" -} - -// indicate if a change has been made (not that accurate, but better than nothing) -func (fm *FeatureManager) IsRestartRequired() bool { - return fm.restartRequired -} - -// Flags that can be edited -func (fm *FeatureManager) IsEditableFromAdminPage(key string) bool { - flag, ok := fm.flags[key] - if !ok || - !fm.IsFeatureEditingAllowed() || - !flag.AllowSelfServe || - flag.Name == FlagFeatureToggleAdminPage { - return false - } - return flag.Stage == FeatureStageGeneralAvailability || - flag.Stage == FeatureStagePublicPreview || - flag.Stage == FeatureStageDeprecated -} - -// Flags that should not be shown in the UI (regardless of their state) -func (fm *FeatureManager) IsHiddenFromAdminPage(key string, lenient bool) bool { - _, hide := fm.Settings.HiddenToggles[key] - flag, ok := fm.flags[key] - if !ok || flag.HideFromAdminPage || hide { - return true // unknown flag (should we show it as a warning!) - } - - // Explicitly hidden from configs - _, found := fm.Settings.HiddenToggles[key] - if found { - return true - } - if lenient { - return false - } - - return flag.Stage == FeatureStageUnknown || - flag.Stage == FeatureStageExperimental || - flag.Stage == FeatureStagePrivatePreview -} - -// Get the flags that were explicitly set on startup -func (fm *FeatureManager) GetStartupFlags() map[string]bool { - return fm.startup -} - -// Perhaps expose the flag warnings -func (fm *FeatureManager) GetWarning() map[string]string { - return fm.warnings -} - -func (fm *FeatureManager) SetRestartRequired() { - fm.restartRequired = true -} - // ############# Test Functions ############# func WithFeatures(spec ...any) FeatureToggles { @@ -223,33 +159,3 @@ func WithManager(spec ...any) *FeatureManager { return &FeatureManager{enabled: enabled, flags: features, startup: enabled, warnings: map[string]string{}} } - -// WithFeatureManager is used to define feature toggle manager for testing. -// It should be used when your test feature toggles require metadata beyond `Name` and `Enabled`. -// You should provide a feature toggle Name at a minimum. -func WithFeatureManager(cfg setting.FeatureMgmtSettings, flags []*FeatureFlag, disabled ...string) *FeatureManager { - count := len(flags) - features := make(map[string]*FeatureFlag, count) - enabled := make(map[string]bool, count) - - dis := make(map[string]bool) - for _, v := range disabled { - dis[v] = true - } - - for _, f := range flags { - if f.Name == "" { - continue - } - features[f.Name] = f - enabled[f.Name] = !dis[f.Name] - } - - return &FeatureManager{ - Settings: cfg, - enabled: enabled, - flags: features, - startup: enabled, - warnings: map[string]string{}, - } -} diff --git a/pkg/services/featuremgmt/registry.go b/pkg/services/featuremgmt/registry.go index c13012b264e..beca14a83cc 100644 --- a/pkg/services/featuremgmt/registry.go +++ b/pkg/services/featuremgmt/registry.go @@ -11,7 +11,7 @@ import ( "embed" "encoding/json" - featuretoggle "github.com/grafana/grafana/pkg/apis/featuretoggle/v0alpha1" + featuretoggleapi "github.com/grafana/grafana/pkg/services/featuremgmt/feature_toggle_api" ) var ( @@ -180,6 +180,13 @@ var ( AllowSelfServe: false, HideFromDocs: true, }, + { + Name: "kubernetesStars", + Description: "Routes stars requests from /api to the /apis endpoint", + Stage: FeatureStageExperimental, + Owner: grafanaAppPlatformSquad, + RequiresRestart: true, // changes the API routing + }, { Name: "influxqlStreamingParser", Description: "Enable streaming JSON parser for InfluxDB datasource InfluxQL query language", @@ -332,15 +339,6 @@ var ( RequiresRestart: true, Owner: grafanaAppPlatformSquad, }, - { - Name: "featureToggleAdminPage", - Description: "Enable admin page for managing feature toggles from the Grafana front-end. Grafana Cloud only.", - Stage: FeatureStageExperimental, - FrontendOnly: false, - Owner: grafanaBackendServicesSquad, - RequiresRestart: true, - HideFromDocs: true, - }, { Name: "awsAsyncQueryCaching", Description: "Enable caching for async queries for Redshift and Athena. Requires that the datasource has caching and async query support enabled", @@ -784,7 +782,7 @@ var ( }, { Name: "alertingSaveStateCompressed", - Description: "Enables the compressed protobuf-based alert state storage", + Description: "Enables the compressed protobuf-based alert state storage. Default is enabled.", Stage: FeatureStagePublicPreview, FrontendOnly: false, Owner: grafanaAlertingSquad, @@ -808,6 +806,16 @@ var ( HideFromDocs: true, HideFromAdminPage: true, }, + { + Name: "useMultipleScopeNodesEndpoint", + Description: "Makes the frontend use the 'names' param for fetching multiple scope nodes at once", + Stage: FeatureStageExperimental, + Owner: grafanaOperatorExperienceSquad, + Expression: "false", + FrontendOnly: true, + HideFromDocs: true, + HideFromAdminPage: true, + }, { Name: "promQLScope", Description: "In-development feature that will allow injection of labels into prometheus queries.", @@ -1512,6 +1520,16 @@ var ( FrontendOnly: true, HideFromDocs: true, }, + { + Name: "alertingUseNewSimplifiedRoutingHashAlgorithm", + Description: "", + Stage: FeatureStagePublicPreview, + Owner: grafanaAlertingSquad, + HideFromAdminPage: true, + HideFromDocs: true, + RequiresRestart: true, + Expression: "true", + }, { Name: "useScopesNavigationEndpoint", Description: "Use the scopes navigation endpoint instead of the dashboardbindings endpoint", @@ -1659,13 +1677,6 @@ var ( HideFromDocs: true, Owner: awsDatasourcesSquad, }, - { - Name: "localizationForPlugins", - Description: "Enables localization for plugins", - Stage: FeatureStageExperimental, - Owner: grafanaPluginsPlatformSquad, - FrontendOnly: false, - }, { Name: "unifiedNavbars", Description: "Enables unified navbars", @@ -2066,6 +2077,14 @@ var ( Owner: grafanaPluginsPlatformSquad, Expression: "false", }, + { + Name: "cdnPluginsUrls", + Description: "Enable loading plugins via declarative URLs", + Stage: FeatureStageExperimental, + FrontendOnly: false, + Owner: grafanaPluginsPlatformSquad, + Expression: "false", + }, } ) @@ -2073,8 +2092,8 @@ var ( var f embed.FS // Get the cached feature list (exposed as a k8s resource) -func GetEmbeddedFeatureList() (featuretoggle.FeatureList, error) { - features := featuretoggle.FeatureList{} +func GetEmbeddedFeatureList() (featuretoggleapi.FeatureList, error) { + features := featuretoggleapi.FeatureList{} body, err := f.ReadFile("toggles_gen.json") if err == nil { err = json.Unmarshal(body, &features) diff --git a/pkg/services/featuremgmt/service.go b/pkg/services/featuremgmt/service.go index d1bc74b17cd..2769a75d788 100644 --- a/pkg/services/featuremgmt/service.go +++ b/pkg/services/featuremgmt/service.go @@ -27,7 +27,6 @@ func ProvideManagerService(cfg *setting.Cfg) (*FeatureManager, error) { enabled: make(map[string]bool), startup: make(map[string]bool), warnings: make(map[string]string), - Settings: cfg.FeatureManagement, log: log.New("featuremgmt"), } diff --git a/pkg/services/featuremgmt/toggles_gen.csv b/pkg/services/featuremgmt/toggles_gen.csv index 243f0e7b37a..ea46bf6e069 100644 --- a/pkg/services/featuremgmt/toggles_gen.csv +++ b/pkg/services/featuremgmt/toggles_gen.csv @@ -21,6 +21,7 @@ lokiQuerySplitting,GA,@grafana/observability-logs,false,false,true individualCookiePreferences,experimental,@grafana/grafana-backend-group,false,false,false influxdbBackendMigration,GA,@grafana/partner-datasources,false,false,true starsFromAPIServer,experimental,@grafana/grafana-frontend-platform,false,false,true +kubernetesStars,experimental,@grafana/grafana-app-platform-squad,false,true,false influxqlStreamingParser,experimental,@grafana/partner-datasources,false,false,false influxdbRunQueriesInParallel,privatePreview,@grafana/partner-datasources,false,false,false lokiLogsDataplane,experimental,@grafana/observability-logs,false,false,false @@ -42,7 +43,6 @@ datasourceAPIServers,experimental,@grafana/grafana-app-platform-squad,false,true grafanaAPIServerWithExperimentalAPIs,experimental,@grafana/grafana-app-platform-squad,true,true,false provisioning,experimental,@grafana/grafana-app-platform-squad,false,true,false grafanaAPIServerEnsureKubectlAccess,experimental,@grafana/grafana-app-platform-squad,true,true,false -featureToggleAdminPage,experimental,@grafana/grafana-backend-services-squad,false,true,false awsAsyncQueryCaching,GA,@grafana/aws-datasources,false,false,false queryCacheRequestDeduplication,experimental,@grafana/grafana-operator-experience-squad,false,false,false permissionsFilterRemoveSubquery,experimental,@grafana/search-and-storage,false,false,false @@ -106,6 +106,7 @@ alertingSaveStatePeriodic,privatePreview,@grafana/alerting-squad,false,false,fal alertingSaveStateCompressed,preview,@grafana/alerting-squad,false,false,false scopeApi,experimental,@grafana/grafana-app-platform-squad,false,false,false useScopeSingleNodeEndpoint,experimental,@grafana/grafana-operator-experience-squad,false,false,true +useMultipleScopeNodesEndpoint,experimental,@grafana/grafana-operator-experience-squad,false,false,true promQLScope,GA,@grafana/oss-big-tent,false,false,false logQLScope,privatePreview,@grafana/observability-logs,false,false,false sqlExpressions,preview,@grafana/grafana-datasources-core-services,false,false,false @@ -197,6 +198,7 @@ fetchRulesUsingPost,experimental,@grafana/alerting-squad,false,false,false newLogsPanel,experimental,@grafana/observability-logs,false,false,true grafanaconThemes,GA,@grafana/grafana-frontend-platform,false,true,false alertingJiraIntegration,experimental,@grafana/alerting-squad,false,false,true +alertingUseNewSimplifiedRoutingHashAlgorithm,preview,@grafana/alerting-squad,false,true,false useScopesNavigationEndpoint,experimental,@grafana/grafana-frontend-platform,false,false,true scopeSearchAllLevels,experimental,@grafana/grafana-frontend-platform,false,false,false alertingRuleVersionHistoryRestore,GA,@grafana/alerting-squad,false,false,true @@ -215,7 +217,6 @@ unifiedStorageGrpcConnectionPool,experimental,@grafana/search-and-storage,false, alertingRulePermanentlyDelete,GA,@grafana/alerting-squad,false,false,true alertingRuleRecoverDeleted,GA,@grafana/alerting-squad,false,false,true multiTenantTempCredentials,experimental,@grafana/aws-datasources,false,false,false -localizationForPlugins,experimental,@grafana/plugins-platform-backend,false,false,false unifiedNavbars,GA,@grafana/plugins-platform-backend,false,false,true logsPanelControls,preview,@grafana/observability-logs,false,false,true metricsFromProfiles,experimental,@grafana/observability-traces-and-profiling,false,false,true @@ -265,3 +266,4 @@ pluginContainers,privatePreview,@grafana/plugins-platform-backend,false,true,fal tempoSearchBackendMigration,GA,@grafana/oss-big-tent,false,true,false filterOutBotsFromFrontendLogs,experimental,@grafana/plugins-platform-backend,false,false,true cdnPluginsLoadFirst,experimental,@grafana/plugins-platform-backend,false,false,false +cdnPluginsUrls,experimental,@grafana/plugins-platform-backend,false,false,false diff --git a/pkg/services/featuremgmt/toggles_gen.go b/pkg/services/featuremgmt/toggles_gen.go index b189a80ead6..b22fc6ec5eb 100644 --- a/pkg/services/featuremgmt/toggles_gen.go +++ b/pkg/services/featuremgmt/toggles_gen.go @@ -95,6 +95,10 @@ const ( // populate star status from apiserver FlagStarsFromAPIServer = "starsFromAPIServer" + // FlagKubernetesStars + // Routes stars requests from /api to the /apis endpoint + FlagKubernetesStars = "kubernetesStars" + // FlagInfluxqlStreamingParser // Enable streaming JSON parser for InfluxDB datasource InfluxQL query language FlagInfluxqlStreamingParser = "influxqlStreamingParser" @@ -179,10 +183,6 @@ const ( // Start an additional https handler and write kubectl options FlagGrafanaAPIServerEnsureKubectlAccess = "grafanaAPIServerEnsureKubectlAccess" - // FlagFeatureToggleAdminPage - // Enable admin page for managing feature toggles from the Grafana front-end. Grafana Cloud only. - FlagFeatureToggleAdminPage = "featureToggleAdminPage" - // FlagAwsAsyncQueryCaching // Enable caching for async queries for Redshift and Athena. Requires that the datasource has caching and async query support enabled FlagAwsAsyncQueryCaching = "awsAsyncQueryCaching" @@ -424,7 +424,7 @@ const ( FlagAlertingSaveStatePeriodic = "alertingSaveStatePeriodic" // FlagAlertingSaveStateCompressed - // Enables the compressed protobuf-based alert state storage + // Enables the compressed protobuf-based alert state storage. Default is enabled. FlagAlertingSaveStateCompressed = "alertingSaveStateCompressed" // FlagScopeApi @@ -435,6 +435,10 @@ const ( // Use the single node endpoint for the scope api. This is used to fetch the scope parent node. FlagUseScopeSingleNodeEndpoint = "useScopeSingleNodeEndpoint" + // FlagUseMultipleScopeNodesEndpoint + // Makes the frontend use the 'names' param for fetching multiple scope nodes at once + FlagUseMultipleScopeNodesEndpoint = "useMultipleScopeNodesEndpoint" + // FlagPromQLScope // In-development feature that will allow injection of labels into prometheus queries. FlagPromQLScope = "promQLScope" @@ -799,6 +803,9 @@ const ( // Enables the new Jira integration for contact points in cloud alert managers. FlagAlertingJiraIntegration = "alertingJiraIntegration" + // FlagAlertingUseNewSimplifiedRoutingHashAlgorithm + FlagAlertingUseNewSimplifiedRoutingHashAlgorithm = "alertingUseNewSimplifiedRoutingHashAlgorithm" + // FlagUseScopesNavigationEndpoint // Use the scopes navigation endpoint instead of the dashboardbindings endpoint FlagUseScopesNavigationEndpoint = "useScopesNavigationEndpoint" @@ -871,10 +878,6 @@ const ( // use multi-tenant path for awsTempCredentials FlagMultiTenantTempCredentials = "multiTenantTempCredentials" - // FlagLocalizationForPlugins - // Enables localization for plugins - FlagLocalizationForPlugins = "localizationForPlugins" - // FlagUnifiedNavbars // Enables unified navbars FlagUnifiedNavbars = "unifiedNavbars" @@ -1070,4 +1073,8 @@ const ( // FlagCdnPluginsLoadFirst // Prioritize loading plugins from the CDN before other sources FlagCdnPluginsLoadFirst = "cdnPluginsLoadFirst" + + // FlagCdnPluginsUrls + // Enable loading plugins via declarative URLs + FlagCdnPluginsUrls = "cdnPluginsUrls" ) diff --git a/pkg/services/featuremgmt/toggles_gen.json b/pkg/services/featuremgmt/toggles_gen.json index ec62239bd1e..ac862d2312c 100644 --- a/pkg/services/featuremgmt/toggles_gen.json +++ b/pkg/services/featuremgmt/toggles_gen.json @@ -555,14 +555,14 @@ { "metadata": { "name": "alertingSaveStateCompressed", - "resourceVersion": "1754657532777", + "resourceVersion": "1759485036332", "creationTimestamp": "2025-01-27T17:47:33Z", "annotations": { - "grafana.app/updatedTimestamp": "2025-08-08 12:52:12.777935 +0000 UTC" + "grafana.app/updatedTimestamp": "2025-10-03 09:50:36.332762 +0000 UTC" } }, "spec": { - "description": "Enables the compressed protobuf-based alert state storage", + "description": "Enables the compressed protobuf-based alert state storage. Default is enabled.", "stage": "preview", "codeowner": "@grafana/alerting-squad", "expression": "true" @@ -610,6 +610,46 @@ "expression": "true" } }, + { + "metadata": { + "name": "alertingUseNewSimplifiedRoutingHashAlgorithm", + "resourceVersion": "1759339813575", + "creationTimestamp": "2025-10-01T17:28:42Z", + "deletionTimestamp": "2025-10-01T17:29:29Z", + "annotations": { + "grafana.app/updatedTimestamp": "2025-10-01 17:30:13.575464 +0000 UTC" + } + }, + "spec": { + "description": "", + "stage": "preview", + "codeowner": "@grafana/alerting-squad", + "requiresRestart": true, + "hideFromAdminPage": true, + "hideFromDocs": true, + "expression": "true" + } + }, + { + "metadata": { + "name": "alertingUseOldSimplifiedRoutingHashAlgorithm", + "resourceVersion": "1759339782639", + "creationTimestamp": "2025-10-01T17:29:29Z", + "deletionTimestamp": "2025-10-01T17:30:13Z", + "annotations": { + "grafana.app/updatedTimestamp": "2025-10-01 17:29:42.63941 +0000 UTC" + } + }, + "spec": { + "description": "", + "stage": "deprecated", + "codeowner": "@grafana/alerting-squad", + "requiresRestart": true, + "hideFromAdminPage": true, + "hideFromDocs": true, + "expression": "false" + } + }, { "metadata": { "name": "alertmanagerRemotePrimary", @@ -863,6 +903,19 @@ "expression": "false" } }, + { + "metadata": { + "name": "cdnPluginsUrls", + "resourceVersion": "1759489886228", + "creationTimestamp": "2025-10-03T11:11:26Z" + }, + "spec": { + "description": "Enable loading plugins via declarative URLs", + "stage": "experimental", + "codeowner": "@grafana/plugins-platform-backend", + "expression": "false" + } + }, { "metadata": { "name": "cloudRBACRoles", @@ -1577,6 +1630,7 @@ "name": "featureToggleAdminPage", "resourceVersion": "1758022099771", "creationTimestamp": "2023-07-18T20:43:32Z", + "deletionTimestamp": "2025-09-29T13:36:16Z", "annotations": { "grafana.app/updatedTimestamp": "2025-09-16 11:28:19.771156 +0000 UTC" } @@ -2187,6 +2241,19 @@ "requiresRestart": true } }, + { + "metadata": { + "name": "kubernetesStars", + "resourceVersion": "1759149842036", + "creationTimestamp": "2025-09-29T12:44:02Z" + }, + "spec": { + "description": "Routes stars requests from /api to the /apis endpoint", + "stage": "experimental", + "codeowner": "@grafana/grafana-app-platform-squad", + "requiresRestart": true + } + }, { "metadata": { "name": "localeFormatPreference", @@ -2203,7 +2270,8 @@ "metadata": { "name": "localizationForPlugins", "resourceVersion": "1753448760331", - "creationTimestamp": "2025-03-31T04:38:38Z" + "creationTimestamp": "2025-03-31T04:38:38Z", + "deletionTimestamp": "2025-09-29T07:10:59Z" }, "spec": { "description": "Enables localization for plugins", @@ -3831,6 +3899,22 @@ "frontend": true } }, + { + "metadata": { + "name": "useMultipleScopeNodesEndpoint", + "resourceVersion": "1759237515008", + "creationTimestamp": "2025-09-30T13:05:15Z" + }, + "spec": { + "description": "Makes the frontend use the 'names' param for fetching multiple scope nodes at once", + "stage": "experimental", + "codeowner": "@grafana/grafana-operator-experience-squad", + "frontend": true, + "hideFromAdminPage": true, + "hideFromDocs": true, + "expression": "false" + } + }, { "metadata": { "name": "useScopeSingleNodeEndpoint", diff --git a/pkg/services/featuremgmt/toggles_gen_test.go b/pkg/services/featuremgmt/toggles_gen_test.go index d3c7f88f746..d6a7c66802d 100644 --- a/pkg/services/featuremgmt/toggles_gen_test.go +++ b/pkg/services/featuremgmt/toggles_gen_test.go @@ -21,7 +21,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/grafana/grafana/pkg/apimachinery/utils" - featuretoggleapi "github.com/grafana/grafana/pkg/apis/featuretoggle/v0alpha1" + featuretoggleapi "github.com/grafana/grafana/pkg/services/featuremgmt/feature_toggle_api" "github.com/grafana/grafana/pkg/services/featuremgmt/strcase" ) diff --git a/pkg/services/folder/folderimpl/folder.go b/pkg/services/folder/folderimpl/folder.go index f56e682283b..bad681c60a8 100644 --- a/pkg/services/folder/folderimpl/folder.go +++ b/pkg/services/folder/folderimpl/folder.go @@ -1401,13 +1401,13 @@ func (s *Service) validateParent(ctx context.Context, orgID int64, parentUID str // Create folder under itself is not allowed if parentUID == UID { - return folder.ErrCircularReference + return folder.ErrCircularReference.Errorf("circular reference detected") } // check there is no circular reference for _, ancestor := range ancestors { if ancestor.UID == UID { - return folder.ErrCircularReference + return folder.ErrCircularReference.Errorf("circular reference detected") } } diff --git a/pkg/services/folder/folderimpl/sqlstore.go b/pkg/services/folder/folderimpl/sqlstore.go index 65cd5623ec4..0644cae2d3d 100644 --- a/pkg/services/folder/folderimpl/sqlstore.go +++ b/pkg/services/folder/folderimpl/sqlstore.go @@ -417,7 +417,7 @@ func (ss *FolderStoreImpl) GetHeight(ctx context.Context, foldrUID string, orgID ele := queue[0] queue = queue[1:] if parentUID != nil && *parentUID == ele { - return 0, folder.ErrCircularReference + return 0, folder.ErrCircularReference.Errorf("circular reference detected") } folders, err := ss.GetChildren(ctx, folder.GetChildrenQuery{UID: ele, OrgID: orgID}) if err != nil { diff --git a/pkg/services/folder/folderimpl/sqlstore_test.go b/pkg/services/folder/folderimpl/sqlstore_test.go index 08f4df69f5f..007767f2a6a 100644 --- a/pkg/services/folder/folderimpl/sqlstore_test.go +++ b/pkg/services/folder/folderimpl/sqlstore_test.go @@ -840,7 +840,7 @@ func TestIntegrationGetHeight(t *testing.T) { t.Run("should failed when the parent folder exist in the subtree", func(t *testing.T) { _, err = folderStore.GetHeight(context.Background(), parent.UID, orgID, &subTree[0]) - require.Error(t, err, folder.ErrCircularReference) + require.Error(t, err, folder.ErrCircularReference.Errorf("circular reference detected")) }) } diff --git a/pkg/services/folder/folderimpl/unifiedstore.go b/pkg/services/folder/folderimpl/unifiedstore.go index 0487af8aa40..28ced5f2321 100644 --- a/pkg/services/folder/folderimpl/unifiedstore.go +++ b/pkg/services/folder/folderimpl/unifiedstore.go @@ -298,7 +298,7 @@ func (ss *FolderUnifiedStoreImpl) GetHeight(ctx context.Context, foldrUID string ele := queue[0] queue = queue[1:] if parentUID != nil && *parentUID == ele { - return 0, folder.ErrCircularReference + return 0, folder.ErrCircularReference.Errorf("circular reference detected") } folders, err := ss.GetChildren(ctx, folder.GetChildrenQuery{UID: ele, OrgID: orgID}) if err != nil { diff --git a/pkg/services/navtree/models.go b/pkg/services/navtree/models.go index 51bbdf261b1..79b633515c1 100644 --- a/pkg/services/navtree/models.go +++ b/pkg/services/navtree/models.go @@ -48,7 +48,6 @@ const ( NavIDAlerting = "alerting" NavIDObservability = "observability" NavIDInfrastructure = "infrastructure" - NavIDFrontend = "frontend" NavIDReporting = "reports" NavIDApps = "apps" NavIDCfgGeneral = "cfg/general" diff --git a/pkg/services/navtree/navtreeimpl/admin.go b/pkg/services/navtree/navtreeimpl/admin.go index ff256522fba..8794a57c3e9 100644 --- a/pkg/services/navtree/navtreeimpl/admin.go +++ b/pkg/services/navtree/navtreeimpl/admin.go @@ -44,15 +44,6 @@ func (s *ServiceImpl) getAdminNode(c *contextmodel.ReqContext) (*navtree.NavLink Text: "Organizations", SubTitle: "Isolated instances of Grafana running on the same server", Id: "global-orgs", Url: s.cfg.AppSubURL + "/admin/orgs", Icon: "building", }) } - if s.features.IsEnabled(ctx, featuremgmt.FlagFeatureToggleAdminPage) && hasAccess(ac.EvalPermission(ac.ActionFeatureManagementRead)) { - generalNodeLinks = append(generalNodeLinks, &navtree.NavLink{ - Text: "Feature toggles", - SubTitle: "View and edit feature toggles", - Id: "feature-toggles", - Url: s.cfg.AppSubURL + "/admin/featuretoggles", - Icon: "toggle-on", - }) - } if hasAccess(cloudmigration.MigrationAssistantAccess) && s.features.IsEnabled(ctx, featuremgmt.FlagOnPremToCloudMigrations) { generalNodeLinks = append(generalNodeLinks, &navtree.NavLink{ Text: "Migrate to Grafana Cloud", diff --git a/pkg/services/navtree/navtreeimpl/applinks.go b/pkg/services/navtree/navtreeimpl/applinks.go index e009b7bc196..0d9229a533c 100644 --- a/pkg/services/navtree/navtreeimpl/applinks.go +++ b/pkg/services/navtree/navtreeimpl/applinks.go @@ -260,10 +260,21 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n } } + sectionChildren := []*navtree.NavLink{appLink} + // asserts pages expand to root Observability section instead of it's own node + if plugin.ID == "grafana-asserts-app" { + sectionChildren = appLink.Children + + // keep current sorting if the pages, but above all the other apps + for _, child := range sectionChildren { + child.SortWeight = -100 + child.SortWeight + } + } + if sectionID == navtree.NavIDRoot { treeRoot.AddSection(appLink) } else if navNode := treeRoot.FindById(sectionID); navNode != nil { - navNode.Children = append(navNode.Children, appLink) + navNode.Children = append(navNode.Children, sectionChildren...) } else { switch sectionID { case navtree.NavIDApps: @@ -272,18 +283,19 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n Icon: "layer-group", SubTitle: "App plugins that extend the Grafana experience", Id: navtree.NavIDApps, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, SortWeight: navtree.WeightApps, Url: s.cfg.AppSubURL + "/apps", }) case navtree.NavIDObservability: + treeRoot.AddSection(&navtree.NavLink{ Text: "Observability", Id: navtree.NavIDObservability, SubTitle: "Monitor infrastructure and applications in real time with Grafana Cloud's fully managed observability suite", Icon: "heart-rate", SortWeight: navtree.WeightObservability, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, Url: s.cfg.AppSubURL + "/observability", }) case navtree.NavIDInfrastructure: @@ -293,19 +305,9 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n SubTitle: "Understand your infrastructure's health", Icon: "heart-rate", SortWeight: navtree.WeightInfrastructure, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, Url: s.cfg.AppSubURL + "/infrastructure", }) - case navtree.NavIDFrontend: - treeRoot.AddSection(&navtree.NavLink{ - Text: "Frontend", - Id: navtree.NavIDFrontend, - SubTitle: "Gain real user monitoring insights", - Icon: "frontend-observability", - SortWeight: navtree.WeightFrontend, - Children: []*navtree.NavLink{appLink}, - Url: s.cfg.AppSubURL + "/frontend", - }) case navtree.NavIDAlertsAndIncidents: alertsAndIncidentsChildren := []*navtree.NavLink{} for _, alertingNode := range alertingNodes { @@ -332,7 +334,7 @@ func (s *ServiceImpl) addPluginToSection(c *contextmodel.ReqContext, treeRoot *n SubTitle: "Optimize performance with k6 and Synthetic Monitoring insights", Icon: "k6", SortWeight: navtree.WeightTestingAndSynthetics, - Children: []*navtree.NavLink{appLink}, + Children: sectionChildren, Url: s.cfg.AppSubURL + "/testing-and-synthetics", }) case navtree.NavIDAdaptiveTelemetry: @@ -372,11 +374,11 @@ func (s *ServiceImpl) hasAccessToInclude(c *contextmodel.ReqContext, pluginID st func (s *ServiceImpl) readNavigationSettings() { s.navigationAppConfig = map[string]NavigationAppConfig{ "grafana-asserts-app": {SectionID: navtree.NavIDObservability, SortWeight: 1, Icon: "asserts"}, - "grafana-app-observability-app": {SectionID: navtree.NavIDObservability, SortWeight: 2, Text: "Application"}, - "grafana-csp-app": {SectionID: navtree.NavIDObservability, SortWeight: 3, Icon: "cloud-provider"}, - "grafana-k8s-app": {SectionID: navtree.NavIDObservability, SortWeight: 4, Text: "Kubernetes"}, - "grafana-dbo11y-app": {SectionID: navtree.NavIDObservability, SortWeight: 5, Text: "Databases"}, - "grafana-kowalski-app": {SectionID: navtree.NavIDObservability, SortWeight: 6, Text: "Frontend"}, + "grafana-kowalski-app": {SectionID: navtree.NavIDObservability, SortWeight: 2, Text: "Frontend"}, + "grafana-app-observability-app": {SectionID: navtree.NavIDObservability, SortWeight: 3, Text: "Application"}, + "grafana-dbo11y-app": {SectionID: navtree.NavIDObservability, SortWeight: 4, Text: "Database", IsNew: true}, + "grafana-k8s-app": {SectionID: navtree.NavIDObservability, SortWeight: 5, Text: "Kubernetes"}, + "grafana-csp-app": {SectionID: navtree.NavIDObservability, SortWeight: 6, Icon: "cloud-provider"}, "grafana-metricsdrilldown-app": {SectionID: navtree.NavIDDrilldown, SortWeight: 1, Text: "Metrics"}, "grafana-lokiexplore-app": {SectionID: navtree.NavIDDrilldown, SortWeight: 2, Text: "Logs"}, "grafana-exploretraces-app": {SectionID: navtree.NavIDDrilldown, SortWeight: 3, Text: "Traces"}, @@ -390,10 +392,10 @@ func (s *ServiceImpl) readNavigationSettings() { "grafana-slo-app": {SectionID: navtree.NavIDAlertsAndIncidents, SortWeight: 7}, "grafana-cloud-link-app": {SectionID: navtree.NavIDCfgPlugins, SortWeight: 3}, "grafana-costmanagementui-app": {SectionID: navtree.NavIDCfg, Text: "Cost management"}, - "grafana-adaptive-metrics-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 1, Text: "Adaptive Metrics", SubTitle: "Analyzes and reduces unused metrics and cardinality to help you focus on your most valuable performance data."}, - "grafana-adaptivelogs-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 2, Text: "Adaptive Logs", SubTitle: "Analyzes log patterns to drop repetitive lines and accelerate troubleshooting."}, - "grafana-adaptivetraces-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 3, Text: "Adaptive Traces", SubTitle: "Analyzes and retains the most valuable traces, providing the performance insights needed to resolve issues faster."}, - "grafana-adaptiveprofiles-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 4, Text: "Adaptive Profiles", SubTitle: "Analyzes application profiles to pinpoint the root cause of performance issues and accelerate resolution."}, + "grafana-adaptive-metrics-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 1}, + "grafana-adaptivelogs-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 2}, + "grafana-adaptivetraces-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 3}, + "grafana-adaptiveprofiles-app": {SectionID: navtree.NavIDAdaptiveTelemetry, SortWeight: 4}, "grafana-attributions-app": {SectionID: navtree.NavIDCfg, Text: "Attributions"}, "grafana-logvolumeexplorer-app": {SectionID: navtree.NavIDCfg, Text: "Log Volume Explorer"}, "grafana-easystart-app": {SectionID: navtree.NavIDRoot, SortWeight: navtree.WeightApps + 1, Text: "Connections", Icon: "adjust-circle"}, diff --git a/pkg/services/navtree/navtreeimpl/applinks_test.go b/pkg/services/navtree/navtreeimpl/applinks_test.go index 87b0276f370..53c06601900 100644 --- a/pkg/services/navtree/navtreeimpl/applinks_test.go +++ b/pkg/services/navtree/navtreeimpl/applinks_test.go @@ -387,7 +387,7 @@ func TestReadingNavigationSettings(t *testing.T) { require.Equal(t, "dashboards", service.navigationAppConfig["grafana-k8s-app"].SectionID) require.Equal(t, "admin", service.navigationAppConfig["other-app"].SectionID) - require.Equal(t, int64(4), service.navigationAppConfig["grafana-k8s-app"].SortWeight) + require.Equal(t, int64(5), service.navigationAppConfig["grafana-k8s-app"].SortWeight) require.Equal(t, int64(12), service.navigationAppConfig["other-app"].SortWeight) require.Equal(t, "admin", service.navigationAppPathConfig["/a/grafana-k8s-app/foo"].SectionID) diff --git a/pkg/services/ngalert/api/api_alertmanager_test.go b/pkg/services/ngalert/api/api_alertmanager_test.go index bbe1df17155..433c4d173f6 100644 --- a/pkg/services/ngalert/api/api_alertmanager_test.go +++ b/pkg/services/ngalert/api/api_alertmanager_test.go @@ -291,7 +291,8 @@ func TestAlertmanagerAutogenConfig(t *testing.T) { 1: {AlertmanagerConfiguration: validConfig, OrgID: 1}, 2: {AlertmanagerConfiguration: validConfigWithoutAutogen, OrgID: 2}, } - sut.mam = createMultiOrgAlertmanager(t, configs) + ft := featuremgmt.WithFeatures(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm) + sut.mam = createMultiOrgAlertmanager(t, configs, withAMFeatureToggles(ft)) return sut, configs } @@ -577,9 +578,29 @@ func createSut(t *testing.T) AlertmanagerSrv { } } -func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration) *notifier.MultiOrgAlertmanager { +type createMultiOrgAMOptions struct { + featureToggles featuremgmt.FeatureToggles +} + +type createMultiOrgAMOptionsFunc func(*createMultiOrgAMOptions) + +func withAMFeatureToggles(toggles featuremgmt.FeatureToggles) createMultiOrgAMOptionsFunc { + return func(opts *createMultiOrgAMOptions) { + opts.featureToggles = toggles + } +} + +func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration, opts ...createMultiOrgAMOptionsFunc) *notifier.MultiOrgAlertmanager { t.Helper() + options := createMultiOrgAMOptions{ + featureToggles: featuremgmt.WithFeatures(), + } + + for _, opt := range opts { + opt(&options) + } + configStore := notifier.NewFakeConfigStore(t, configs) orgStore := notifier.NewFakeOrgStore(t, []int64{1, 2, 3}) provStore := ngfakes.NewFakeProvisioningStore() @@ -610,7 +631,7 @@ func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertC ngfakes.NewFakeReceiverPermissionsService(), log.New("testlogger"), secretsService, - featuremgmt.WithManager(), + options.featureToggles, nil, ) require.NoError(t, err) diff --git a/pkg/services/ngalert/api/api_provisioning.go b/pkg/services/ngalert/api/api_provisioning.go index 29f3ca6679b..dc8863e1fb6 100644 --- a/pkg/services/ngalert/api/api_provisioning.go +++ b/pkg/services/ngalert/api/api_provisioning.go @@ -525,7 +525,7 @@ func determineProvenance(ctx *contextmodel.ReqContext) definitions.Provenance { } func extractExportRequest(c *contextmodel.ReqContext) definitions.ExportQueryParams { - var format = "yaml" + format := "yaml" acceptHeader := c.Req.Header.Get("Accept") if strings.Contains(acceptHeader, "yaml") { @@ -673,11 +673,22 @@ func escapeRuleGroup(group definitions.AlertRuleGroupExport) definitions.AlertRu func escapeRuleNotificationSettings(ns definitions.AlertRuleNotificationSettingsExport) definitions.AlertRuleNotificationSettingsExport { ns.Receiver = addEscapeCharactersToString(ns.Receiver) - for j := range ns.GroupBy { - ns.GroupBy[j] = addEscapeCharactersToString(ns.GroupBy[j]) + if ns.GroupBy != nil { + for j := range *ns.GroupBy { + (*ns.GroupBy)[j] = addEscapeCharactersToString((*ns.GroupBy)[j]) + } } - for k := range ns.MuteTimeIntervals { - ns.MuteTimeIntervals[k] = addEscapeCharactersToString(ns.MuteTimeIntervals[k]) + + if ns.MuteTimeIntervals != nil { + for k := range *ns.MuteTimeIntervals { + (*ns.MuteTimeIntervals)[k] = addEscapeCharactersToString((*ns.MuteTimeIntervals)[k]) + } + } + + if ns.ActiveTimeIntervals != nil { + for k := range *ns.ActiveTimeIntervals { + (*ns.ActiveTimeIntervals)[k] = addEscapeCharactersToString((*ns.ActiveTimeIntervals)[k]) + } } return ns } diff --git a/pkg/services/ngalert/api/api_provisioning_test.go b/pkg/services/ngalert/api/api_provisioning_test.go index a7dd8f1f2fe..db380834c4a 100644 --- a/pkg/services/ngalert/api/api_provisioning_test.go +++ b/pkg/services/ngalert/api/api_provisioning_test.go @@ -14,8 +14,7 @@ import ( "testing" "time" - alertingNotify "github.com/grafana/alerting/notify" - "github.com/grafana/alerting/receivers/schema" + "github.com/grafana/alerting/notify/notifytest" prometheus "github.com/prometheus/alertmanager/config" "github.com/prometheus/alertmanager/pkg/labels" "github.com/prometheus/alertmanager/timeinterval" @@ -2033,12 +2032,12 @@ func TestApiContactPointExportSnapshot(t *testing.T) { t.Run(fmt.Sprintf("exportType=%s", exportType), func(t *testing.T) { for _, redacted := range []bool{true, false} { t.Run(fmt.Sprintf("redacted=%t", redacted), func(t *testing.T) { - allIntegrations := make([]models.Integration, 0, len(alertingNotify.AllKnownConfigsForTesting)) - for integrationType := range alertingNotify.AllKnownConfigsForTesting { + allIntegrations := make([]models.Integration, 0, len(notifytest.AllKnownV1ConfigsForTesting)) + for integrationType := range notifytest.AllKnownV1ConfigsForTesting { integration := models.IntegrationGen( models.IntegrationMuts.WithName(allIntegrationsName), - models.IntegrationMuts.WithUID(fmt.Sprintf("%s-uid", integrationType)), - models.IntegrationMuts.WithValidConfig(schema.IntegrationType(integrationType)), + models.IntegrationMuts.WithUID(fmt.Sprintf("%s-uid", strings.ToLower(string(integrationType)))), + models.IntegrationMuts.WithValidConfig(integrationType), )() integration.DisableResolveMessage = redacted allIntegrations = append(allIntegrations, integration) diff --git a/pkg/services/ngalert/api/api_ruler_export_test.go b/pkg/services/ngalert/api/api_ruler_export_test.go index d9b69d9e1d3..dcdeaf0c436 100644 --- a/pkg/services/ngalert/api/api_ruler_export_test.go +++ b/pkg/services/ngalert/api/api_ruler_export_test.go @@ -201,6 +201,33 @@ func TestExportFromPayload(t *testing.T) { require.Equal(t, `attachment;filename=export.tf`, rc.Resp.Header().Get("Content-Disposition")) }) }) + + t.Run("hcl body with simplified routing is as expected", func(t *testing.T) { + requestFile := "post-rulegroup-simplified-routing.json" + + rawBody, err := testData.ReadFile(path.Join("test-data", requestFile)) + require.NoError(t, err) + + var buf bytes.Buffer + require.NoError(t, json.Compact(&buf, rawBody)) + + var body apimodels.PostableRuleGroupConfig + require.NoError(t, json.Unmarshal(buf.Bytes(), &body)) + + expectedResponse, err := testData.ReadFile(path.Join("test-data", strings.Replace(requestFile, ".json", "-export.hcl", 1))) + require.NoError(t, err) + + rc := createRequest() + rc.Req.Form.Set("format", "hcl") + rc.Req.Form.Set("download", "false") + + response := srv.ExportFromPayload(rc, body, folder.UID) + response.WriteTo(rc) + + require.Equal(t, 200, response.Status()) + require.Equal(t, string(expectedResponse), string(response.Body())) + require.Equal(t, "text/hcl", rc.Resp.Header().Get("Content-Type")) + }) } func TestExportRules(t *testing.T) { diff --git a/pkg/services/ngalert/api/api_testing.go b/pkg/services/ngalert/api/api_testing.go index e26f2a89f9a..ceda51a7a22 100644 --- a/pkg/services/ngalert/api/api_testing.go +++ b/pkg/services/ngalert/api/api_testing.go @@ -113,7 +113,7 @@ func (srv TestingApiSrv) RouteTestGrafanaRuleConfig(c *contextmodel.ReqContext, now, rule, results, - state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder), + state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder, srv.featureManager), nil, ) diff --git a/pkg/services/ngalert/api/compat/compat.go b/pkg/services/ngalert/api/compat/compat.go index 9d1796f78f5..eadf40217af 100644 --- a/pkg/services/ngalert/api/compat/compat.go +++ b/pkg/services/ngalert/api/compat/compat.go @@ -483,12 +483,12 @@ func AlertRuleNotificationSettingsExportFromNotificationSettings(ns []models.Not return &definitions.AlertRuleNotificationSettingsExport{ Receiver: m.Receiver, - GroupBy: m.GroupBy, + GroupBy: NilIfEmpty(util.Pointer(m.GroupBy)), GroupWait: toStringIfNotNil(m.GroupWait), GroupInterval: toStringIfNotNil(m.GroupInterval), RepeatInterval: toStringIfNotNil(m.RepeatInterval), - MuteTimeIntervals: m.MuteTimeIntervals, - ActiveTimeIntervals: m.ActiveTimeIntervals, + MuteTimeIntervals: NilIfEmpty(util.Pointer(m.MuteTimeIntervals)), + ActiveTimeIntervals: NilIfEmpty(util.Pointer(m.ActiveTimeIntervals)), } } diff --git a/pkg/services/ngalert/api/compat_contact_points.go b/pkg/services/ngalert/api/compat_contact_points.go index e18b5c0a74a..5a55bc9ca95 100644 --- a/pkg/services/ngalert/api/compat_contact_points.go +++ b/pkg/services/ngalert/api/compat_contact_points.go @@ -7,6 +7,7 @@ import ( "strings" "unsafe" + alertingModels "github.com/grafana/alerting/models" "github.com/grafana/alerting/notify" "github.com/grafana/alerting/receivers" jsoniter "github.com/json-iterator/go" @@ -53,7 +54,7 @@ func ContactPointToContactPointExport(cp definitions.ContactPoint) (notify.APIRe len(cp.Threema) + len(cp.Victorops) + len(cp.Webhook) + len(cp.Wecom) + len(cp.Webex) + len(cp.Mqtt) - integration := make([]*notify.GrafanaIntegrationConfig, 0, contactPointsLength) + integration := make([]*alertingModels.IntegrationConfig, 0, contactPointsLength) var errs []error for _, i := range cp.Alertmanager { @@ -222,20 +223,20 @@ func ContactPointToContactPointExport(cp definitions.ContactPoint) (notify.APIRe return notify.APIReceiver{}, errors.Join(errs...) } contactPoint := notify.APIReceiver{ - ConfigReceiver: notify.ConfigReceiver{Name: cp.Name}, - GrafanaIntegrations: notify.GrafanaIntegrations{Integrations: integration}, + ConfigReceiver: notify.ConfigReceiver{Name: cp.Name}, + ReceiverConfig: alertingModels.ReceiverConfig{Integrations: integration}, } return contactPoint, nil } // marshallIntegration converts the API model integration to the storage model that contains settings in the JSON format. // The secret fields are not encrypted. -func marshallIntegration(json jsoniter.API, integrationType string, integration interface{}, disableResolveMessage *bool) (*notify.GrafanaIntegrationConfig, error) { +func marshallIntegration(json jsoniter.API, integrationType string, integration interface{}, disableResolveMessage *bool) (*alertingModels.IntegrationConfig, error) { data, err := json.Marshal(integration) if err != nil { return nil, fmt.Errorf("failed to marshall integration '%s' to JSON: %w", integrationType, err) } - e := ¬ify.GrafanaIntegrationConfig{ + e := &alertingModels.IntegrationConfig{ Type: integrationType, Settings: data, } diff --git a/pkg/services/ngalert/api/compat_contact_points_test.go b/pkg/services/ngalert/api/compat_contact_points_test.go index bda8336b0bf..ef178dd085f 100644 --- a/pkg/services/ngalert/api/compat_contact_points_test.go +++ b/pkg/services/ngalert/api/compat_contact_points_test.go @@ -7,8 +7,12 @@ import ( "testing" "github.com/google/go-cmp/cmp" + alertingmodels "github.com/grafana/alerting/models" "github.com/grafana/alerting/notify" + "github.com/grafana/alerting/notify/notifytest" + "github.com/grafana/alerting/receivers/line" receiversTesting "github.com/grafana/alerting/receivers/testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" apicompat "github.com/grafana/grafana/pkg/services/ngalert/api/compat" @@ -53,12 +57,12 @@ func TestContactPointFromContactPointExports(t *testing.T) { } // use the configs for testing because they have all fields supported by integrations - for integrationType, cfg := range notify.AllKnownConfigsForTesting { - t.Run(integrationType, func(t *testing.T) { + for integrationType, cfg := range notifytest.AllKnownV1ConfigsForTesting { + t.Run(string(integrationType), func(t *testing.T) { recCfg := ¬ify.APIReceiver{ ConfigReceiver: notify.ConfigReceiver{Name: "test-receiver"}, - GrafanaIntegrations: notify.GrafanaIntegrations{ - Integrations: []*notify.GrafanaIntegrationConfig{ + ReceiverConfig: alertingmodels.ReceiverConfig{ + Integrations: []*alertingmodels.IntegrationConfig{ cfg.GetRawNotifierConfig("test"), }, }, @@ -87,9 +91,15 @@ func TestContactPointFromContactPointExports(t *testing.T) { } if integrationType != "webhook" { // Many notifiers now support HTTPClientConfig but only Webhook currently has it enabled in schema. - //TODO: Remove this once HTTPClientConfig is added to other schemas. + // TODO: Remove this once HTTPClientConfig is added to other schemas. pathFilters = append(pathFilters, "HTTPClientConfig") } + if integrationType == line.Type { + for _, l := range actual.LineConfigs { + assert.Equal(t, "line", l.Type) + l.Type = string(line.Type) + } + } pathFilter := cmp.FilterPath(func(path cmp.Path) bool { for _, filter := range pathFilters { if strings.Contains(path.String(), filter) { diff --git a/pkg/services/ngalert/api/test-data/post-rulegroup-simplified-routing-export.hcl b/pkg/services/ngalert/api/test-data/post-rulegroup-simplified-routing-export.hcl new file mode 100644 index 00000000000..ff3c07ee97e --- /dev/null +++ b/pkg/services/ngalert/api/test-data/post-rulegroup-simplified-routing-export.hcl @@ -0,0 +1,45 @@ +resource "grafana_rule_group" "rule_group_2b12784d0e1454cd" { + org_id = 1 + name = "group_simplified_routing" + folder_uid = "e4584834-1a87-4dff-8913-8a4748dfca79" + interval_seconds = 10 + + rule { + name = "test" + condition = "C" + + data { + ref_id = "A" + + relative_time_range { + from = 600 + to = 0 + } + + datasource_uid = "grafanacloud-prom" + model = "{\"editorMode\":\"code\",\"expr\":\"vector(1)\",\"instant\":true,\"intervalMs\":1000,\"legendFormat\":\"__auto\",\"maxDataPoints\":43200,\"range\":false,\"refId\":\"A\"}" + } + data { + ref_id = "C" + + relative_time_range { + from = 0 + to = 0 + } + + datasource_uid = "__expr__" + model = "{\"conditions\":[{\"evaluator\":{\"params\":[1],\"type\":\"gt\"},\"operator\":{\"type\":\"and\"},\"query\":{\"params\":[\"C\"]},\"reducer\":{\"params\":[],\"type\":\"last\"},\"type\":\"query\"}],\"datasource\":{\"type\":\"__expr__\",\"uid\":\"__expr__\"},\"expression\":\"A\",\"intervalMs\":1000,\"maxDataPoints\":43200,\"refId\":\"C\",\"type\":\"threshold\"}" + } + + no_data_state = "NoData" + exec_err_state = "Error" + for = "1m" + annotations = {} + labels = {} + is_paused = false + + notification_settings { + contact_point = "email" + } + } +} diff --git a/pkg/services/ngalert/api/test-data/post-rulegroup-simplified-routing.json b/pkg/services/ngalert/api/test-data/post-rulegroup-simplified-routing.json new file mode 100644 index 00000000000..7cd163d4d35 --- /dev/null +++ b/pkg/services/ngalert/api/test-data/post-rulegroup-simplified-routing.json @@ -0,0 +1,94 @@ +{ + "name": "group_simplified_routing", + "interval": "10s", + "rules": [ + { + "grafana_alert": { + "title": "test", + "condition": "C", + "data": [ + { + "refId": "A", + "queryType": "", + "relativeTimeRange": { + "from": 600, + "to": 0 + }, + "datasourceUid": "grafanacloud-prom", + "model": { + "editorMode": "code", + "expr": "vector(1)", + "instant": true, + "intervalMs": 1000, + "legendFormat": "__auto", + "maxDataPoints": 43200, + "range": false, + "refId": "A" + } + }, + { + "refId": "C", + "queryType": "", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "C", + "type": "threshold" + } + } + ], + "is_paused": false, + "no_data_state": "NoData", + "exec_err_state": "Error", + "notification_settings": { + "receiver": "email" + }, + "metadata": { + "editor_settings": { + "simplified_query_and_expressions_section": false, + "simplified_notifications_section": true + } + }, + "missing_series_evals_to_resolve": 0, + "uid": "alert-with-simplified-routing" + }, + "annotations": {}, + "labels": {}, + "for": "1m", + "keep_firing_for": "0s" + } + ] +} diff --git a/pkg/services/ngalert/api/tooling/definitions/alertmanager.go b/pkg/services/ngalert/api/tooling/definitions/alertmanager.go index 5c33ad658fe..530264dc287 100644 --- a/pkg/services/ngalert/api/tooling/definitions/alertmanager.go +++ b/pkg/services/ngalert/api/tooling/definitions/alertmanager.go @@ -603,15 +603,15 @@ type AlertGroups = amv2.AlertGroups type AlertGroup = amv2.AlertGroup -type Receiver = alertingmodels.Receiver +type Receiver = alertingmodels.ReceiverStatus // swagger:response receiversResponse type ReceiversResponse struct { // in:body - Body []alertingmodels.Receiver + Body []alertingmodels.ReceiverStatus } -type Integration = alertingmodels.Integration +type Integration = alertingmodels.IntegrationStatus // swagger:parameters RouteGetAMAlerts RouteGetAMAlertGroups RouteGetGrafanaAMAlerts RouteGetGrafanaAMAlertGroups type AlertsParams struct { diff --git a/pkg/services/ngalert/api/tooling/definitions/provisioning_alert_rules.go b/pkg/services/ngalert/api/tooling/definitions/provisioning_alert_rules.go index 0d91080064b..cbfb7adfd24 100644 --- a/pkg/services/ngalert/api/tooling/definitions/provisioning_alert_rules.go +++ b/pkg/services/ngalert/api/tooling/definitions/provisioning_alert_rules.go @@ -306,13 +306,13 @@ type RelativeTimeRangeExport struct { type AlertRuleNotificationSettingsExport struct { // Field name mismatches with Terraform provider schema are noted where applicable. - Receiver string `yaml:"receiver,omitempty" json:"receiver,omitempty" hcl:"contact_point"` // TF -> `contact_point` - GroupBy []string `yaml:"group_by,omitempty" json:"group_by,omitempty" hcl:"group_by"` - GroupWait *string `yaml:"group_wait,omitempty" json:"group_wait,omitempty" hcl:"group_wait,optional"` - GroupInterval *string `yaml:"group_interval,omitempty" json:"group_interval,omitempty" hcl:"group_interval,optional"` - RepeatInterval *string `yaml:"repeat_interval,omitempty" json:"repeat_interval,omitempty" hcl:"repeat_interval,optional"` - MuteTimeIntervals []string `yaml:"mute_time_intervals,omitempty" json:"mute_time_intervals,omitempty" hcl:"mute_timings"` // TF -> `mute_timings` - ActiveTimeIntervals []string `yaml:"active_time_intervals,omitempty" json:"active_time_intervals,omitempty" hcl:"active_timings"` // TF -> `active_timings` + Receiver string `yaml:"receiver,omitempty" json:"receiver,omitempty" hcl:"contact_point"` // TF -> `contact_point` + GroupBy *[]string `yaml:"group_by,omitempty" json:"group_by,omitempty" hcl:"group_by,optional"` + GroupWait *string `yaml:"group_wait,omitempty" json:"group_wait,omitempty" hcl:"group_wait,optional"` + GroupInterval *string `yaml:"group_interval,omitempty" json:"group_interval,omitempty" hcl:"group_interval,optional"` + RepeatInterval *string `yaml:"repeat_interval,omitempty" json:"repeat_interval,omitempty" hcl:"repeat_interval,optional"` + MuteTimeIntervals *[]string `yaml:"mute_time_intervals,omitempty" json:"mute_time_intervals,omitempty" hcl:"mute_timings,optional"` // TF -> `mute_timings` + ActiveTimeIntervals *[]string `yaml:"active_time_intervals,omitempty" json:"active_time_intervals,omitempty" hcl:"active_timings,optional"` // TF -> `active_timings` } // Record is the provisioned export of models.Record. diff --git a/pkg/services/ngalert/models/notifications.go b/pkg/services/ngalert/models/notifications.go index 36e4df6ae0d..609ee724085 100644 --- a/pkg/services/ngalert/models/notifications.go +++ b/pkg/services/ngalert/models/notifications.go @@ -8,6 +8,7 @@ import ( "unsafe" "github.com/grafana/grafana-plugin-sdk-go/data" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/prometheus/common/model" ) @@ -102,12 +103,12 @@ func (s *NotificationSettings) Validate() error { // - AutogeneratedRouteLabel: "true" // - AutogeneratedRouteReceiverNameLabel: Receiver // - AutogeneratedRouteSettingsHashLabel: Fingerprint (if the NotificationSettings are not all default) -func (s *NotificationSettings) ToLabels() data.Labels { +func (s *NotificationSettings) ToLabels(features featuremgmt.FeatureToggles) data.Labels { result := make(data.Labels, 3) result[AutogeneratedRouteLabel] = "true" result[AutogeneratedRouteReceiverNameLabel] = s.Receiver if !s.IsAllDefault() { - result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint().String() + result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint(features).String() } return result } @@ -160,7 +161,7 @@ func NewDefaultNotificationSettings(receiver string) NotificationSettings { // Fingerprint calculates a hash value to uniquely identify a NotificationSettings by its attributes. // The hash is calculated by concatenating the strings and durations of the NotificationSettings attributes // and using an invalid UTF-8 sequence as a separator. -func (s *NotificationSettings) Fingerprint() data.Fingerprint { +func (s *NotificationSettings) Fingerprint(features featuremgmt.FeatureToggles) data.Fingerprint { h := fnv.New64() tmp := make([]byte, 8) @@ -192,7 +193,10 @@ func (s *NotificationSettings) Fingerprint() data.Fingerprint { } // Add a separator between the time intervals to avoid collisions // when all settings are the same including interval names except for the interval type (mute vs active). - _, _ = h.Write([]byte{255}) + // Use new algorithm by default, unless feature flag is explicitly disabled + if features == nil || (features != nil && features.IsEnabledGlobally(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm)) { + _, _ = h.Write([]byte{255}) + } for _, interval := range s.ActiveTimeIntervals { writeString(interval) } diff --git a/pkg/services/ngalert/models/notifications_test.go b/pkg/services/ngalert/models/notifications_test.go index 2e7df039895..1bacbe403e7 100644 --- a/pkg/services/ngalert/models/notifications_test.go +++ b/pkg/services/ngalert/models/notifications_test.go @@ -195,7 +195,7 @@ func TestNotificationSettingsLabels(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - labels := tt.notificationSettings.ToLabels() + labels := tt.notificationSettings.ToLabels(nil) require.Equal(t, tt.labels, labels) }) } @@ -219,7 +219,7 @@ func TestNotificationSettings_TimeIntervals(t *testing.T) { ActiveTimeIntervals: []string{timeInterval}, } - require.NotEqual(t, activeSettings.Fingerprint(), muteSettings.Fingerprint()) + require.NotEqual(t, activeSettings.Fingerprint(nil), muteSettings.Fingerprint(nil)) } func TestNormalizedGroupBy(t *testing.T) { diff --git a/pkg/services/ngalert/models/receivers.go b/pkg/services/ngalert/models/receivers.go index 1f892147938..08734b0f56b 100644 --- a/pkg/services/ngalert/models/receivers.go +++ b/pkg/services/ngalert/models/receivers.go @@ -12,6 +12,7 @@ import ( "sort" "strings" + "github.com/grafana/alerting/models" alertingNotify "github.com/grafana/alerting/notify" "github.com/grafana/alerting/receivers/schema" ) @@ -572,7 +573,7 @@ func (integration *Integration) Validate(decryptFn DecryptFn) error { return err } - return ValidateIntegration(context.Background(), alertingNotify.GrafanaIntegrationConfig{ + return ValidateIntegration(context.Background(), models.IntegrationConfig{ UID: decrypted.UID, Name: decrypted.Name, Type: decrypted.Config.Type, @@ -582,7 +583,7 @@ func (integration *Integration) Validate(decryptFn DecryptFn) error { }, alertingNotify.NoopDecrypt) } -func ValidateIntegration(ctx context.Context, integration alertingNotify.GrafanaIntegrationConfig, decryptFunc alertingNotify.GetDecryptedValueFn) error { +func ValidateIntegration(ctx context.Context, integration models.IntegrationConfig, decryptFunc alertingNotify.GetDecryptedValueFn) error { if integration.Type == "" { return fmt.Errorf("type should not be an empty string") } @@ -591,8 +592,8 @@ func ValidateIntegration(ctx context.Context, integration alertingNotify.Grafana } _, err := alertingNotify.BuildReceiverConfiguration(ctx, &alertingNotify.APIReceiver{ - GrafanaIntegrations: alertingNotify.GrafanaIntegrations{ - Integrations: []*alertingNotify.GrafanaIntegrationConfig{&integration}, + ReceiverConfig: models.ReceiverConfig{ + Integrations: []*models.IntegrationConfig{&integration}, }, }, alertingNotify.DecodeSecretsFromBase64, decryptFunc) if err != nil { diff --git a/pkg/services/ngalert/models/receivers_test.go b/pkg/services/ngalert/models/receivers_test.go index 0c6c08c3d93..785fffd49be 100644 --- a/pkg/services/ngalert/models/receivers_test.go +++ b/pkg/services/ngalert/models/receivers_test.go @@ -6,6 +6,7 @@ import ( "testing" alertingNotify "github.com/grafana/alerting/notify" + "github.com/grafana/alerting/notify/notifytest" "github.com/grafana/alerting/receivers/schema" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -40,8 +41,7 @@ func TestReceiver_EncryptDecrypt(t *testing.T) { encryptFn := Base64Enrypt decryptnFn := Base64Decrypt // Test that all known integration types encrypt and decrypt their secrets. - for it := range alertingNotify.AllKnownConfigsForTesting { - integrationType := schema.IntegrationType(it) + for integrationType := range notifytest.AllKnownV1ConfigsForTesting { t.Run(string(integrationType), func(t *testing.T) { decrypedIntegration := IntegrationGen(IntegrationMuts.WithValidConfig(integrationType))() encrypted := decrypedIntegration.Clone() @@ -76,8 +76,7 @@ func TestIntegration_Redact(t *testing.T) { return "TESTREDACTED" } // Test that all known integration types redact their secrets. - for it := range alertingNotify.AllKnownConfigsForTesting { - integrationType := schema.IntegrationType(it) + for integrationType := range notifytest.AllKnownV1ConfigsForTesting { t.Run(string(integrationType), func(t *testing.T) { validIntegration := IntegrationGen(IntegrationMuts.WithValidConfig(integrationType))() @@ -106,8 +105,7 @@ func TestIntegration_Validate(t *testing.T) { testutil.SkipIntegrationTestInShortMode(t) // Test that all known integration types are valid. - for it := range alertingNotify.AllKnownConfigsForTesting { - integrationType := schema.IntegrationType(it) + for integrationType := range notifytest.AllKnownV1ConfigsForTesting { t.Run(string(integrationType), func(t *testing.T) { validIntegration := IntegrationGen(IntegrationMuts.WithValidConfig(integrationType))() assert.NoError(t, validIntegration.Encrypt(Base64Enrypt)) @@ -242,8 +240,7 @@ func TestIntegration_WithExistingSecureFields(t *testing.T) { func TestSecretsIntegrationConfig(t *testing.T) { // Test that all known integration types have a config and correctly mark their secrets as secure. - for it := range alertingNotify.AllKnownConfigsForTesting { - integrationType := schema.IntegrationType(it) + for integrationType := range notifytest.AllKnownV1ConfigsForTesting { t.Run(string(integrationType), func(t *testing.T) { schemaType, ok := alertingNotify.GetSchemaForIntegration(integrationType) require.True(t, ok) @@ -272,8 +269,8 @@ func TestSecretsIntegrationConfig(t *testing.T) { } t.Run("Unknown version returns error", func(t *testing.T) { - for s := range maps.Keys(alertingNotify.AllKnownConfigsForTesting) { - schemaType, _ := alertingNotify.GetSchemaForIntegration(schema.IntegrationType(s)) + for s := range maps.Keys(notifytest.AllKnownV1ConfigsForTesting) { + schemaType, _ := alertingNotify.GetSchemaForIntegration(s) _, err := IntegrationConfigFromSchema(schemaType, "unknown") require.Error(t, err) return @@ -285,8 +282,8 @@ func TestIntegration_SecureFields(t *testing.T) { testutil.SkipIntegrationTestInShortMode(t) // Test that all known integration types have a config and correctly mark their secrets as secure. - for it := range alertingNotify.AllKnownConfigsForTesting { - integrationType := schema.IntegrationType(it) + for it := range notifytest.AllKnownV1ConfigsForTesting { + integrationType := it t.Run(string(integrationType), func(t *testing.T) { t.Run("contains SecureSettings", func(t *testing.T) { validIntegration := IntegrationGen(IntegrationMuts.WithValidConfig(integrationType))() diff --git a/pkg/services/ngalert/models/testing.go b/pkg/services/ngalert/models/testing.go index 2164ca8f86f..0fd681b693c 100644 --- a/pkg/services/ngalert/models/testing.go +++ b/pkg/services/ngalert/models/testing.go @@ -13,6 +13,7 @@ import ( "github.com/go-openapi/strfmt" "github.com/google/uuid" alertingNotify "github.com/grafana/alerting/notify" + "github.com/grafana/alerting/notify/notifytest" "github.com/grafana/alerting/receivers/schema" "github.com/grafana/alerting/receivers/webex" "github.com/grafana/grafana-plugin-sdk-go/data" @@ -1270,7 +1271,7 @@ func CopyIntegrationWith(r Integration, mutators ...Mutator[Integration]) Integr func IntegrationGen(mutators ...Mutator[Integration]) func() Integration { return func() Integration { name := util.GenerateShortUID() - randomIntegrationType, _ := randomMapKey(alertingNotify.AllKnownConfigsForTesting) + randomIntegrationType, _ := randomMapKey(notifytest.AllKnownV1ConfigsForTesting) c := Integration{ UID: util.GenerateShortUID(), @@ -1280,7 +1281,7 @@ func IntegrationGen(mutators ...Mutator[Integration]) func() Integration { SecureSettings: make(map[string]string), } - IntegrationMuts.WithValidConfig(schema.IntegrationType(randomIntegrationType))(&c) + IntegrationMuts.WithValidConfig(randomIntegrationType)(&c) for _, mutator := range mutators { mutator(&c) @@ -1317,7 +1318,11 @@ func (n IntegrationMutators) WithName(name string) Mutator[Integration] { func (n IntegrationMutators) WithValidConfig(integrationType schema.IntegrationType) Mutator[Integration] { return func(c *Integration) { // TODO add support for v0 integrations - config := alertingNotify.AllKnownConfigsForTesting[string(integrationType)].GetRawNotifierConfig(c.Name) + ncfg, ok := notifytest.AllKnownV1ConfigsForTesting[integrationType] + if !ok { + panic(fmt.Sprintf("unknown integration type: %s", integrationType)) + } + config := ncfg.GetRawNotifierConfig(c.Name) typeSchema, _ := alertingNotify.GetSchemaForIntegration(integrationType) integrationConfig, _ := IntegrationConfigFromSchema(typeSchema, schema.V1) c.Config = integrationConfig @@ -1337,7 +1342,10 @@ func (n IntegrationMutators) WithValidConfig(integrationType schema.IntegrationT func (n IntegrationMutators) WithInvalidConfig(integrationType schema.IntegrationType) Mutator[Integration] { return func(c *Integration) { - typeSchema, _ := alertingNotify.GetSchemaForIntegration(integrationType) + typeSchema, ok := alertingNotify.GetSchemaForIntegration(integrationType) + if !ok { + panic(fmt.Sprintf("unknown integration type: %s", integrationType)) + } c.Config, _ = IntegrationConfigFromSchema(typeSchema, schema.V1) c.Settings = map[string]interface{}{} c.SecureSettings = map[string]string{} diff --git a/pkg/services/ngalert/ngalert.go b/pkg/services/ngalert/ngalert.go index 2f18840d50a..a014048a46a 100644 --- a/pkg/services/ngalert/ngalert.go +++ b/pkg/services/ngalert/ngalert.go @@ -220,7 +220,7 @@ func (ng *AlertNG) init() error { Timeout: ng.Cfg.UnifiedAlerting.RemoteAlertmanager.Timeout, } autogenFn := func(ctx context.Context, logger log.Logger, orgID int64, cfg *definitions.PostableApiAlertingConfig, skipInvalid bool) error { - return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid) + return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid, ng.FeatureToggles) } // This function will be used by the MOA to create new Alertmanagers. diff --git a/pkg/services/ngalert/notifier/alertmanager.go b/pkg/services/ngalert/notifier/alertmanager.go index 8c833c14a2b..f4454d87c5f 100644 --- a/pkg/services/ngalert/notifier/alertmanager.go +++ b/pkg/services/ngalert/notifier/alertmanager.go @@ -9,6 +9,7 @@ import ( "strconv" "time" + "github.com/grafana/alerting/models" alertingNotify "github.com/grafana/alerting/notify" "github.com/grafana/alerting/notify/nfstatus" "github.com/prometheus/alertmanager/config" @@ -56,6 +57,7 @@ type alertmanager struct { DefaultConfiguration string decryptFn alertingNotify.GetDecryptedValueFn crypto Crypto + features featuremgmt.FeatureToggles } // maintenanceOptions represent the options for components that need maintenance on a frequency within the Alertmanager. @@ -155,6 +157,7 @@ func NewAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A logger: l.New("component", "alertmanager", opts.TenantKey, opts.TenantID), // similar to what the base does decryptFn: decryptFn, crypto: crypto, + features: featureToggles, } return am, nil @@ -344,7 +347,7 @@ func (am *alertmanager) applyConfig(ctx context.Context, cfg *apimodels.Postable templates := alertingNotify.PostableAPITemplatesToTemplateDefinitions(cfg.GetMergedTemplateDefinitions()) // Now add autogenerated config to the route. - err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid) + err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid, am.features) if err != nil { return false, err } @@ -363,7 +366,7 @@ func (am *alertmanager) applyConfig(ctx context.Context, cfg *apimodels.Postable return false, nil } - receivers := PostableApiAlertingConfigToApiReceivers(amConfig) + receivers := alertingNotify.PostableAPIReceiversToAPIReceivers(amConfig.Receivers) for _, recv := range receivers { err = patchNewSecureFields(ctx, recv, alertingNotify.DecodeSecretsFromBase64, am.decryptFn) if err != nil { @@ -404,7 +407,7 @@ func patchNewSecureFields(ctx context.Context, api *alertingNotify.APIReceiver, return nil } -func patchSettingsFromSecureSettings(ctx context.Context, integration *alertingNotify.GrafanaIntegrationConfig, key string, decode alertingNotify.DecodeSecretsFn, decrypt alertingNotify.GetDecryptedValueFn) error { +func patchSettingsFromSecureSettings(ctx context.Context, integration *models.IntegrationConfig, key string, decode alertingNotify.DecodeSecretsFn, decrypt alertingNotify.GetDecryptedValueFn) error { if _, ok := integration.SecureSettings[key]; !ok { return nil } diff --git a/pkg/services/ngalert/notifier/alertmanager_config.go b/pkg/services/ngalert/notifier/alertmanager_config.go index 25a24cb5441..514da9be686 100644 --- a/pkg/services/ngalert/notifier/alertmanager_config.go +++ b/pkg/services/ngalert/notifier/alertmanager_config.go @@ -133,7 +133,7 @@ func (moa *MultiOrgAlertmanager) GetAlertmanagerConfiguration(ctx context.Contex // Otherwise, broken settings (e.g. a receiver that doesn't exist) will cause the config returned here to be // different than the config currently in-use. // TODO: Preferably, we'd be getting the config directly from the in-memory AM so adding the autogen config would not be necessary. - err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true) + err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true, moa.featureManager) if err != nil { return definitions.GettableUserConfig{}, err } @@ -324,6 +324,15 @@ func (moa *MultiOrgAlertmanager) SaveAndApplyAlertmanagerConfiguration(ctx conte } cleanPermissionsErr := err + if previousConfig != nil { + // If there is a previous configuration, we need to copy its extra configs to the new one. + extraConfigs, err := extractExtraConfigs(previousConfig.AlertmanagerConfiguration) + if err != nil { + return fmt.Errorf("failed to extract extra configs from previous configuration: %w", err) + } + config.ExtraConfigs = extraConfigs + } + if err := moa.Crypto.ProcessSecureSettings(ctx, org, config.AlertmanagerConfig.Receivers); err != nil { return fmt.Errorf("failed to post process Alertmanager configuration: %w", err) } @@ -572,3 +581,18 @@ func extractReceiverNames(rawConfig string) (sets.Set[string], error) { return receiverNames, nil } + +// extractExtraConfigs extracts encrypted (does not decrypt) extra configurations from the raw Alertmanager config. +func extractExtraConfigs(rawConfig string) ([]definitions.ExtraConfiguration, error) { + // Slimmed down version of the Alertmanager configuration to extract extra configs. + type extraConfigUserConfig struct { + ExtraConfigs []definitions.ExtraConfiguration `yaml:"extra_config,omitempty" json:"extra_config,omitempty"` + } + + cfg := &extraConfigUserConfig{} + if err := json.Unmarshal([]byte(rawConfig), cfg); err != nil { + return nil, fmt.Errorf("unable to parse Alertmanager configuration: %w", err) + } + + return cfg.ExtraConfigs, nil +} diff --git a/pkg/services/ngalert/notifier/alertmanager_config_test.go b/pkg/services/ngalert/notifier/alertmanager_config_test.go index 3b12153da98..f4b06285db5 100644 --- a/pkg/services/ngalert/notifier/alertmanager_config_test.go +++ b/pkg/services/ngalert/notifier/alertmanager_config_test.go @@ -150,6 +150,264 @@ receivers: }) } +func TestMultiOrgAlertmanager_SaveAndApplyAlertmanagerConfiguration(t *testing.T) { + orgID := int64(1) + ctx := context.Background() + + t.Run("SaveAndApplyAlertmanagerConfiguration preserves existing extra configs", func(t *testing.T) { + mam := setupMam(t, nil) + require.NoError(t, mam.LoadAndSyncAlertmanagersForOrgs(ctx)) + + extraConfig := definitions.ExtraConfiguration{ + Identifier: "test-extra-config", + MergeMatchers: amconfig.Matchers{&labels.Matcher{Type: labels.MatchEqual, Name: "env", Value: "test"}}, + TemplateFiles: map[string]string{"test.tmpl": "{{ define \"test\" }}Test{{ end }}"}, + AlertmanagerConfig: `route: + receiver: extra-receiver +receivers: + - name: extra-receiver`, + } + + err := mam.SaveAndApplyExtraConfiguration(ctx, orgID, extraConfig) + require.NoError(t, err) + + // Verify extra config was saved + gettableConfig, err := mam.GetAlertmanagerConfiguration(ctx, orgID, false, false) + require.NoError(t, err) + require.Len(t, gettableConfig.ExtraConfigs, 1) + require.Equal(t, extraConfig.Identifier, gettableConfig.ExtraConfigs[0].Identifier) + + // Apply a new main configuration + newMainConfig := definitions.PostableUserConfig{ + AlertmanagerConfig: definitions.PostableApiAlertingConfig{ + Config: definitions.Config{ + Route: &definitions.Route{ + Receiver: "main-receiver", + }, + }, + Receivers: []*definitions.PostableApiReceiver{ + { + Receiver: amconfig.Receiver{ + Name: "main-receiver", + }, + PostableGrafanaReceivers: definitions.PostableGrafanaReceivers{ + GrafanaManagedReceivers: []*definitions.PostableGrafanaReceiver{ + { + Name: "main-receiver", + Type: "email", + Settings: definitions.RawMessage(`{"addresses": "me@grafana.com"}`), + }, + }, + }, + }, + }, + }, + } + + err = mam.SaveAndApplyAlertmanagerConfiguration(ctx, orgID, newMainConfig) + require.NoError(t, err) + + // Verify that the extra config is still present after applying the new main config + updatedConfig, err := mam.GetAlertmanagerConfiguration(ctx, orgID, false, false) + require.NoError(t, err) + require.Len(t, updatedConfig.ExtraConfigs, 1) + require.Equal(t, extraConfig.Identifier, updatedConfig.ExtraConfigs[0].Identifier) + require.Equal(t, extraConfig.TemplateFiles, updatedConfig.ExtraConfigs[0].TemplateFiles) + + // Verify the main config was updated + require.Equal(t, "main-receiver", updatedConfig.AlertmanagerConfig.Route.Receiver) + require.Len(t, updatedConfig.AlertmanagerConfig.Receivers, 1) + require.Equal(t, "main-receiver", updatedConfig.AlertmanagerConfig.Receivers[0].Name) + }) + + t.Run("SaveAndApplyAlertmanagerConfiguration handles missing extra_configs field", func(t *testing.T) { + mam := setupMam(t, nil) + require.NoError(t, mam.LoadAndSyncAlertmanagersForOrgs(ctx)) + + // Apply initial config without extra_configs field + initialConfig := definitions.PostableUserConfig{ + AlertmanagerConfig: definitions.PostableApiAlertingConfig{ + Config: definitions.Config{ + Route: &definitions.Route{ + Receiver: "initial-receiver", + }, + }, + Receivers: []*definitions.PostableApiReceiver{ + { + Receiver: amconfig.Receiver{ + Name: "initial-receiver", + }, + PostableGrafanaReceivers: definitions.PostableGrafanaReceivers{ + GrafanaManagedReceivers: []*definitions.PostableGrafanaReceiver{ + { + Name: "initial-receiver", + Type: "email", + Settings: definitions.RawMessage(`{"addresses": "initial@grafana.com"}`), + }, + }, + }, + }, + }, + }, + } + + err := mam.SaveAndApplyAlertmanagerConfiguration(ctx, orgID, initialConfig) + require.NoError(t, err) + + // Apply a new main configuration + newMainConfig := definitions.PostableUserConfig{ + AlertmanagerConfig: definitions.PostableApiAlertingConfig{ + Config: definitions.Config{ + Route: &definitions.Route{ + Receiver: "main-receiver", + }, + }, + Receivers: []*definitions.PostableApiReceiver{ + { + Receiver: amconfig.Receiver{ + Name: "main-receiver", + }, + PostableGrafanaReceivers: definitions.PostableGrafanaReceivers{ + GrafanaManagedReceivers: []*definitions.PostableGrafanaReceiver{ + { + Name: "main-receiver", + Type: "email", + Settings: definitions.RawMessage(`{"addresses": "me@grafana.com"}`), + }, + }, + }, + }, + }, + }, + } + + err = mam.SaveAndApplyAlertmanagerConfiguration(ctx, orgID, newMainConfig) + require.NoError(t, err) + + // Verify that no extra configs are present and main config was updated + updatedConfig, err := mam.GetAlertmanagerConfiguration(ctx, orgID, false, false) + require.NoError(t, err) + require.Len(t, updatedConfig.ExtraConfigs, 0) + require.Equal(t, "main-receiver", updatedConfig.AlertmanagerConfig.Route.Receiver) + }) + + t.Run("SaveAndApplyAlertmanagerConfiguration handles empty extra_configs array", func(t *testing.T) { + mam := setupMam(t, nil) + require.NoError(t, mam.LoadAndSyncAlertmanagersForOrgs(ctx)) + + // Apply initial config with empty extra_configs + initialConfig := definitions.PostableUserConfig{ + AlertmanagerConfig: definitions.PostableApiAlertingConfig{ + Config: definitions.Config{ + Route: &definitions.Route{ + Receiver: "initial-receiver", + }, + }, + Receivers: []*definitions.PostableApiReceiver{ + { + Receiver: amconfig.Receiver{ + Name: "initial-receiver", + }, + PostableGrafanaReceivers: definitions.PostableGrafanaReceivers{ + GrafanaManagedReceivers: []*definitions.PostableGrafanaReceiver{ + { + Name: "initial-receiver", + Type: "email", + Settings: definitions.RawMessage(`{"addresses": "initial@grafana.com"}`), + }, + }, + }, + }, + }, + }, + ExtraConfigs: []definitions.ExtraConfiguration{}, // Empty array + } + + err := mam.SaveAndApplyAlertmanagerConfiguration(ctx, orgID, initialConfig) + require.NoError(t, err) + + // Apply a new main configuration + newMainConfig := definitions.PostableUserConfig{ + AlertmanagerConfig: definitions.PostableApiAlertingConfig{ + Config: definitions.Config{ + Route: &definitions.Route{ + Receiver: "main-receiver", + }, + }, + Receivers: []*definitions.PostableApiReceiver{ + { + Receiver: amconfig.Receiver{ + Name: "main-receiver", + }, + PostableGrafanaReceivers: definitions.PostableGrafanaReceivers{ + GrafanaManagedReceivers: []*definitions.PostableGrafanaReceiver{ + { + Name: "main-receiver", + Type: "email", + Settings: definitions.RawMessage(`{"addresses": "me@grafana.com"}`), + }, + }, + }, + }, + }, + }, + } + + err = mam.SaveAndApplyAlertmanagerConfiguration(ctx, orgID, newMainConfig) + require.NoError(t, err) + + // Verify that no extra configs are present and main config was updated + updatedConfig, err := mam.GetAlertmanagerConfiguration(ctx, orgID, false, false) + require.NoError(t, err) + require.Len(t, updatedConfig.ExtraConfigs, 0) + require.Equal(t, "main-receiver", updatedConfig.AlertmanagerConfig.Route.Receiver) + }) +} + +func TestExtractExtraConfigs(t *testing.T) { + t.Run("extracts extra configs from JSON", func(t *testing.T) { + jsonConfig := `{ + "extra_config": [ + { + "identifier": "test-config", + "merge_matchers": [], + "template_files": {"test.tmpl": "test"}, + "alertmanager_config": "route:\n receiver: test" + } + ] + }` + + extraConfigs, err := extractExtraConfigs(jsonConfig) + require.NoError(t, err) + require.Len(t, extraConfigs, 1) + require.Equal(t, "test-config", extraConfigs[0].Identifier) + }) + + t.Run("handles missing extra_config field", func(t *testing.T) { + jsonConfig := `{"alertmanager_config": {"route": {"receiver": "test"}}}` + + extraConfigs, err := extractExtraConfigs(jsonConfig) + require.NoError(t, err) + require.Len(t, extraConfigs, 0) + }) + + t.Run("handles empty extra_config array", func(t *testing.T) { + jsonConfig := `{"extra_config": []}` + + extraConfigs, err := extractExtraConfigs(jsonConfig) + require.NoError(t, err) + require.Len(t, extraConfigs, 0) + }) + + t.Run("handles null extra_config", func(t *testing.T) { + jsonConfig := `{"extra_config": null}` + + extraConfigs, err := extractExtraConfigs(jsonConfig) + require.NoError(t, err) + require.Len(t, extraConfigs, 0) + }) +} + func TestMultiOrgAlertmanager_DeleteExtraConfiguration(t *testing.T) { orgID := int64(1) diff --git a/pkg/services/ngalert/notifier/alertmanager_mock/Alertmanager.go b/pkg/services/ngalert/notifier/alertmanager_mock/Alertmanager.go index 20f4e14d0ab..e4ec918e59d 100644 --- a/pkg/services/ngalert/notifier/alertmanager_mock/Alertmanager.go +++ b/pkg/services/ngalert/notifier/alertmanager_mock/Alertmanager.go @@ -309,23 +309,23 @@ func (_c *AlertmanagerMock_GetAlerts_Call) RunAndReturn(run func(context.Context } // GetReceivers provides a mock function with given fields: ctx -func (_m *AlertmanagerMock) GetReceivers(ctx context.Context) ([]alertingmodels.Receiver, error) { +func (_m *AlertmanagerMock) GetReceivers(ctx context.Context) ([]alertingmodels.ReceiverStatus, error) { ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for GetReceivers") } - var r0 []alertingmodels.Receiver + var r0 []alertingmodels.ReceiverStatus var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]alertingmodels.Receiver, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) ([]alertingmodels.ReceiverStatus, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) []alertingmodels.Receiver); ok { + if rf, ok := ret.Get(0).(func(context.Context) []alertingmodels.ReceiverStatus); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]alertingmodels.Receiver) + r0 = ret.Get(0).([]alertingmodels.ReceiverStatus) } } @@ -356,12 +356,12 @@ func (_c *AlertmanagerMock_GetReceivers_Call) Run(run func(ctx context.Context)) return _c } -func (_c *AlertmanagerMock_GetReceivers_Call) Return(_a0 []alertingmodels.Receiver, _a1 error) *AlertmanagerMock_GetReceivers_Call { +func (_c *AlertmanagerMock_GetReceivers_Call) Return(_a0 []alertingmodels.ReceiverStatus, _a1 error) *AlertmanagerMock_GetReceivers_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *AlertmanagerMock_GetReceivers_Call) RunAndReturn(run func(context.Context) ([]alertingmodels.Receiver, error)) *AlertmanagerMock_GetReceivers_Call { +func (_c *AlertmanagerMock_GetReceivers_Call) RunAndReturn(run func(context.Context) ([]alertingmodels.ReceiverStatus, error)) *AlertmanagerMock_GetReceivers_Call { _c.Call.Return(run) return _c } diff --git a/pkg/services/ngalert/notifier/autogen_alertmanager.go b/pkg/services/ngalert/notifier/autogen_alertmanager.go index 55f0201fcb3..7523e368ad5 100644 --- a/pkg/services/ngalert/notifier/autogen_alertmanager.go +++ b/pkg/services/ngalert/notifier/autogen_alertmanager.go @@ -12,6 +12,7 @@ import ( "golang.org/x/exp/maps" "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" ) @@ -22,8 +23,8 @@ type autogenRuleStore interface { // AddAutogenConfig creates the autogenerated configuration and adds it to the given apiAlertingConfig. // If skipInvalid is true, then invalid notification settings are skipped, otherwise an error is returned. -func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) error { - autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid) +func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) error { + autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid, features) if err != nil { return err } @@ -39,7 +40,7 @@ func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store // newAutogeneratedRoute creates a new autogenerated route based on the notification settings for the given org. // cfg is used to construct the settings validator and to ensure we create a dedicated route for each receiver. // skipInvalid is used to skip invalid settings instead of returning an error. -func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) (autogeneratedRoute, error) { +func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) (autogeneratedRoute, error) { settings, err := store.ListNotificationSettings(ctx, models.ListNotificationSettingsQuery{OrgID: orgId}) if err != nil { return autogeneratedRoute{}, fmt.Errorf("failed to list alert rules: %w", err) @@ -50,7 +51,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s // contact point even if no rules are using it. This will prevent race conditions between AM sync and rule sync. for _, receiver := range cfg.GetReceivers() { setting := models.NewDefaultNotificationSettings(receiver.GetName()) - fp := setting.Fingerprint() + fp := setting.Fingerprint(features) notificationSettings[fp] = setting } @@ -65,7 +66,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s } return autogeneratedRoute{}, fmt.Errorf("invalid notification settings for rule %s: %w", ruleKey.UID, err) } - fp := setting.Fingerprint() + fp := setting.Fingerprint(features) // Keep only unique settings. if _, ok := notificationSettings[fp]; ok { continue diff --git a/pkg/services/ngalert/notifier/autogen_alertmanager_test.go b/pkg/services/ngalert/notifier/autogen_alertmanager_test.go index 78239c06479..bf376502caa 100644 --- a/pkg/services/ngalert/notifier/autogen_alertmanager_test.go +++ b/pkg/services/ngalert/notifier/autogen_alertmanager_test.go @@ -290,7 +290,7 @@ func TestAddAutogenConfig(t *testing.T) { store.notificationSettings[orgId][models.AlertRuleKey{OrgID: orgId, UID: util.GenerateShortUID()}] = []models.NotificationSettings{setting} } - err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid) + err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid, nil) if tt.expErrorContains != "" { require.Error(t, err) require.ErrorContains(t, err, tt.expErrorContains) diff --git a/pkg/services/ngalert/notifier/compat.go b/pkg/services/ngalert/notifier/compat.go index 9b2dccb8099..63c6ed7d7bb 100644 --- a/pkg/services/ngalert/notifier/compat.go +++ b/pkg/services/ngalert/notifier/compat.go @@ -1,47 +1,11 @@ package notifier import ( - "encoding/json" - alertingNotify "github.com/grafana/alerting/notify" - apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" ) -func PostableGrafanaReceiverToGrafanaIntegrationConfig(p *apimodels.PostableGrafanaReceiver) *alertingNotify.GrafanaIntegrationConfig { - return &alertingNotify.GrafanaIntegrationConfig{ - UID: p.UID, - Name: p.Name, - Type: p.Type, - DisableResolveMessage: p.DisableResolveMessage, - Settings: json.RawMessage(p.Settings), - SecureSettings: p.SecureSettings, - } -} - -func PostableApiReceiverToApiReceiver(r *apimodels.PostableApiReceiver) *alertingNotify.APIReceiver { - integrations := alertingNotify.GrafanaIntegrations{ - Integrations: make([]*alertingNotify.GrafanaIntegrationConfig, 0, len(r.GrafanaManagedReceivers)), - } - for _, cfg := range r.GrafanaManagedReceivers { - integrations.Integrations = append(integrations.Integrations, PostableGrafanaReceiverToGrafanaIntegrationConfig(cfg)) - } - - return &alertingNotify.APIReceiver{ - ConfigReceiver: r.Receiver, - GrafanaIntegrations: integrations, - } -} - -func PostableApiAlertingConfigToApiReceivers(c apimodels.PostableApiAlertingConfig) []*alertingNotify.APIReceiver { - apiReceivers := make([]*alertingNotify.APIReceiver, 0, len(c.Receivers)) - for _, receiver := range c.Receivers { - apiReceivers = append(apiReceivers, PostableApiReceiverToApiReceiver(receiver)) - } - return apiReceivers -} - // Silence-specific compat functions to convert between grafana/alerting and model types. func GettableSilenceToSilence(s alertingNotify.GettableSilence) *models.Silence { diff --git a/pkg/services/ngalert/notifier/compat_test.go b/pkg/services/ngalert/notifier/compat_test.go deleted file mode 100644 index b3a2ff8508a..00000000000 --- a/pkg/services/ngalert/notifier/compat_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package notifier - -import ( - "encoding/json" - "testing" - - alertingNotify "github.com/grafana/alerting/notify" - "github.com/prometheus/alertmanager/config" - "github.com/stretchr/testify/require" - - apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" -) - -func TestPostableGrafanaReceiverToGrafanaIntegrationConfig(t *testing.T) { - r := &apimodels.PostableGrafanaReceiver{ - UID: "test-uid", - Name: "test-name", - Type: "slack", - DisableResolveMessage: false, - Settings: apimodels.RawMessage(`{ "data" : "test" }`), - SecureSettings: map[string]string{ - "test": "data", - }, - } - actual := PostableGrafanaReceiverToGrafanaIntegrationConfig(r) - require.Equal(t, alertingNotify.GrafanaIntegrationConfig{ - UID: "test-uid", - Name: "test-name", - Type: "slack", - DisableResolveMessage: false, - Settings: json.RawMessage(`{ "data" : "test" }`), - SecureSettings: map[string]string{ - "test": "data", - }, - }, *actual) -} - -func TestPostableApiReceiverToApiReceiver(t *testing.T) { - t.Run("returns empty when no receivers", func(t *testing.T) { - r := &apimodels.PostableApiReceiver{ - Receiver: config.Receiver{ - Name: "test-receiver", - }, - } - actual := PostableApiReceiverToApiReceiver(r) - require.Empty(t, actual.Integrations) - require.Equal(t, r.Receiver, actual.ConfigReceiver) - }) - t.Run("converts receivers", func(t *testing.T) { - r := &apimodels.PostableApiReceiver{ - Receiver: config.Receiver{ - Name: "test-receiver", - }, - PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{ - GrafanaManagedReceivers: []*apimodels.PostableGrafanaReceiver{ - { - UID: "test-uid", - Name: "test-name", - Type: "slack", - DisableResolveMessage: false, - Settings: apimodels.RawMessage(`{ "data" : "test" }`), - SecureSettings: map[string]string{ - "test": "data", - }, - }, - { - UID: "test-uid2", - Name: "test-name2", - Type: "webhook", - DisableResolveMessage: false, - Settings: apimodels.RawMessage(`{ "data2" : "test2" }`), - SecureSettings: map[string]string{ - "test2": "data2", - }, - }, - }, - }, - } - actual := PostableApiReceiverToApiReceiver(r) - require.Len(t, actual.Integrations, 2) - require.Equal(t, r.Receiver, actual.ConfigReceiver) - require.Equal(t, *PostableGrafanaReceiverToGrafanaIntegrationConfig(r.GrafanaManagedReceivers[0]), *actual.Integrations[0]) - require.Equal(t, *PostableGrafanaReceiverToGrafanaIntegrationConfig(r.GrafanaManagedReceivers[1]), *actual.Integrations[1]) - }) -} - -func TestPostableApiAlertingConfigToApiReceivers(t *testing.T) { - t.Run("returns empty when no receivers", func(t *testing.T) { - r := apimodels.PostableApiAlertingConfig{ - Config: apimodels.Config{}, - } - actual := PostableApiAlertingConfigToApiReceivers(r) - require.Empty(t, actual) - }) - c := apimodels.PostableApiAlertingConfig{ - Config: apimodels.Config{}, - Receivers: []*apimodels.PostableApiReceiver{ - { - Receiver: config.Receiver{ - Name: "test-receiver", - }, - PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{ - GrafanaManagedReceivers: []*apimodels.PostableGrafanaReceiver{ - { - UID: "test-uid", - Name: "test-name", - Type: "slack", - DisableResolveMessage: false, - Settings: apimodels.RawMessage(`{ "data" : "test" }`), - SecureSettings: map[string]string{ - "test": "data", - }, - }, - }, - }, - }, - { - Receiver: config.Receiver{ - Name: "test-receiver2", - }, - PostableGrafanaReceivers: apimodels.PostableGrafanaReceivers{ - GrafanaManagedReceivers: []*apimodels.PostableGrafanaReceiver{ - { - UID: "test-uid2", - Name: "test-name1", - Type: "slack", - DisableResolveMessage: false, - Settings: apimodels.RawMessage(`{ "data" : "test" }`), - SecureSettings: map[string]string{ - "test": "data", - }, - }, - }, - }, - }, - }, - } - actual := PostableApiAlertingConfigToApiReceivers(c) - - require.Len(t, actual, 2) - require.Equal(t, PostableApiReceiverToApiReceiver(c.Receivers[0]), actual[0]) - require.Equal(t, PostableApiReceiverToApiReceiver(c.Receivers[1]), actual[1]) -} diff --git a/pkg/services/ngalert/notifier/legacy_storage/receivers_test.go b/pkg/services/ngalert/notifier/legacy_storage/receivers_test.go index 72ce37763aa..a020b833de8 100644 --- a/pkg/services/ngalert/notifier/legacy_storage/receivers_test.go +++ b/pkg/services/ngalert/notifier/legacy_storage/receivers_test.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/alerting/definition" "github.com/grafana/alerting/notify" + "github.com/grafana/alerting/notify/notifytest" "github.com/grafana/alerting/receivers/schema" "github.com/grafana/alerting/receivers/webhook" "github.com/prometheus/alertmanager/config" @@ -92,7 +93,7 @@ func TestDeleteReceiver(t *testing.T) { } func TestCreateReceiver(t *testing.T) { - rawCfg := notify.AllKnownConfigsForTesting[string(webhook.Type)] + rawCfg := notifytest.AllKnownV1ConfigsForTesting[webhook.Type] typeSchema, _ := notify.GetSchemaForIntegration(webhook.Type) cfgSchema, err := models.IntegrationConfigFromSchema(typeSchema, schema.V1) require.NoError(t, err) @@ -199,7 +200,7 @@ func TestCreateReceiver(t *testing.T) { } func TestUpdateReceiver(t *testing.T) { - rawCfg := notify.AllKnownConfigsForTesting[string(webhook.Type)] + rawCfg := notifytest.AllKnownV1ConfigsForTesting[webhook.Type] typeSchema, _ := notify.GetSchemaForIntegration(webhook.Type) cfgSchema, err := models.IntegrationConfigFromSchema(typeSchema, schema.V1) require.NoError(t, err) @@ -300,7 +301,7 @@ func TestUpdateReceiver(t *testing.T) { } func TestGetReceiver(t *testing.T) { - rawCfg := notify.AllKnownConfigsForTesting[string(webhook.Type)] + rawCfg := notifytest.AllKnownV1ConfigsForTesting[webhook.Type] typeSchema, _ := notify.GetSchemaForIntegration(webhook.Type) cfgSchema, err := models.IntegrationConfigFromSchema(typeSchema, schema.V1) require.NoError(t, err) @@ -491,7 +492,7 @@ func getConfigRevisionForTest() *ConfigRevision { { UID: "integration-uid-1", Type: "webhook", - Settings: definitions.RawMessage(notify.AllKnownConfigsForTesting["webhook"].Config), + Settings: definitions.RawMessage(notifytest.AllKnownV1ConfigsForTesting["webhook"].Config), }, }, }, @@ -503,7 +504,7 @@ func getConfigRevisionForTest() *ConfigRevision { { UID: "integration-uid-2", Type: "webhook", - Settings: definitions.RawMessage(notify.AllKnownConfigsForTesting["webhook"].Config), + Settings: definitions.RawMessage(notifytest.AllKnownV1ConfigsForTesting["webhook"].Config), }, }, }, @@ -515,7 +516,7 @@ func getConfigRevisionForTest() *ConfigRevision { { UID: "integration-uid-3", Type: "email", - Settings: definitions.RawMessage(notify.AllKnownConfigsForTesting["email"].Config), + Settings: definitions.RawMessage(notifytest.AllKnownV1ConfigsForTesting["email"].Config), }, }, }, diff --git a/pkg/services/ngalert/notifier/receiver_svc_test.go b/pkg/services/ngalert/notifier/receiver_svc_test.go index 5cf9146bb1c..8816124334d 100644 --- a/pkg/services/ngalert/notifier/receiver_svc_test.go +++ b/pkg/services/ngalert/notifier/receiver_svc_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/grafana/alerting/receivers/line" "github.com/prometheus/alertmanager/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -352,7 +353,7 @@ func TestReceiverService_Create(t *testing.T) { slackIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig("slack"))() emailIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig("email"))() - lineIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig("line"))() + lineIntegration := models.IntegrationGen(models.IntegrationMuts.WithName("test receiver"), models.IntegrationMuts.WithValidConfig(line.Type))() baseReceiver := models.ReceiverGen(models.ReceiverMuts.WithName("test receiver"), models.ReceiverMuts.WithIntegrations(slackIntegration))() for _, tc := range []struct { diff --git a/pkg/services/ngalert/notifier/testreceivers.go b/pkg/services/ngalert/notifier/testreceivers.go index 187caab480b..41bd2a181c7 100644 --- a/pkg/services/ngalert/notifier/testreceivers.go +++ b/pkg/services/ngalert/notifier/testreceivers.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" + "github.com/grafana/alerting/models" alertingNotify "github.com/grafana/alerting/notify" v2 "github.com/prometheus/alertmanager/api/v2" @@ -13,9 +14,9 @@ import ( func (am *alertmanager) TestReceivers(ctx context.Context, c apimodels.TestReceiversConfigBodyParams) (*alertingNotify.TestReceiversResult, int, error) { receivers := make([]*alertingNotify.APIReceiver, 0, len(c.Receivers)) for _, r := range c.Receivers { - integrations := make([]*alertingNotify.GrafanaIntegrationConfig, 0, len(r.GrafanaManagedReceivers)) + integrations := make([]*models.IntegrationConfig, 0, len(r.GrafanaManagedReceivers)) for _, gr := range r.GrafanaManagedReceivers { - integrations = append(integrations, &alertingNotify.GrafanaIntegrationConfig{ + integrations = append(integrations, &models.IntegrationConfig{ UID: gr.UID, Name: gr.Name, Type: gr.Type, @@ -26,7 +27,7 @@ func (am *alertmanager) TestReceivers(ctx context.Context, c apimodels.TestRecei } recv := &alertingNotify.APIReceiver{ ConfigReceiver: r.Receiver, - GrafanaIntegrations: alertingNotify.GrafanaIntegrations{ + ReceiverConfig: models.ReceiverConfig{ Integrations: integrations, }, } @@ -52,5 +53,5 @@ func (am *alertmanager) TestReceivers(ctx context.Context, c apimodels.TestRecei } func (am *alertmanager) GetReceivers(_ context.Context) ([]apimodels.Receiver, error) { - return am.Base.GetReceivers(), nil + return am.Base.GetReceiversStatus(), nil } diff --git a/pkg/services/ngalert/notifier/testreceivers_test.go b/pkg/services/ngalert/notifier/testreceivers_test.go index 6ee01b9b9fe..b367a8500ea 100644 --- a/pkg/services/ngalert/notifier/testreceivers_test.go +++ b/pkg/services/ngalert/notifier/testreceivers_test.go @@ -6,13 +6,14 @@ import ( "net/url" "testing" + "github.com/grafana/alerting/models" alertingNotify "github.com/grafana/alerting/notify" "github.com/stretchr/testify/require" ) func TestInvalidReceiverError_Error(t *testing.T) { e := alertingNotify.IntegrationValidationError{ - Integration: &alertingNotify.GrafanaIntegrationConfig{ + Integration: &models.IntegrationConfig{ Name: "test", Type: "test-type", UID: "uid", @@ -24,7 +25,7 @@ func TestInvalidReceiverError_Error(t *testing.T) { func TestReceiverTimeoutError_Error(t *testing.T) { e := alertingNotify.IntegrationTimeoutError{ - Integration: &alertingNotify.GrafanaIntegrationConfig{ + Integration: &models.IntegrationConfig{ Name: "test", UID: "uid", }, @@ -45,7 +46,7 @@ func (e timeoutError) Timeout() bool { func TestProcessNotifierError(t *testing.T) { t.Run("assert ReceiverTimeoutError is returned for context deadline exceeded", func(t *testing.T) { - r := &alertingNotify.GrafanaIntegrationConfig{ + r := &models.IntegrationConfig{ Name: "test", UID: "uid", } @@ -56,7 +57,7 @@ func TestProcessNotifierError(t *testing.T) { }) t.Run("assert ReceiverTimeoutError is returned for *url.Error timeout", func(t *testing.T) { - r := &alertingNotify.GrafanaIntegrationConfig{ + r := &models.IntegrationConfig{ Name: "test", UID: "uid", } @@ -72,7 +73,7 @@ func TestProcessNotifierError(t *testing.T) { }) t.Run("assert unknown error is returned unmodified", func(t *testing.T) { - r := &alertingNotify.GrafanaIntegrationConfig{ + r := &models.IntegrationConfig{ Name: "test", UID: "uid", } diff --git a/pkg/services/ngalert/provisioning/compat.go b/pkg/services/ngalert/provisioning/compat.go index 13d0bba88a9..a4b3ea50922 100644 --- a/pkg/services/ngalert/provisioning/compat.go +++ b/pkg/services/ngalert/provisioning/compat.go @@ -3,19 +3,19 @@ package provisioning import ( "strings" - alertingNotify "github.com/grafana/alerting/notify" + alertingModels "github.com/grafana/alerting/models" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/ngalert/models" ) -func EmbeddedContactPointToGrafanaIntegrationConfig(e definitions.EmbeddedContactPoint) (alertingNotify.GrafanaIntegrationConfig, error) { +func EmbeddedContactPointToGrafanaIntegrationConfig(e definitions.EmbeddedContactPoint) (alertingModels.IntegrationConfig, error) { data, err := e.Settings.MarshalJSON() if err != nil { - return alertingNotify.GrafanaIntegrationConfig{}, err + return alertingModels.IntegrationConfig{}, err } - return alertingNotify.GrafanaIntegrationConfig{ + return alertingModels.IntegrationConfig{ UID: e.UID, Name: e.Name, Type: e.Type, diff --git a/pkg/services/ngalert/provisioning/contactpoints_test.go b/pkg/services/ngalert/provisioning/contactpoints_test.go index b85dfcd3108..1a281f912b6 100644 --- a/pkg/services/ngalert/provisioning/contactpoints_test.go +++ b/pkg/services/ngalert/provisioning/contactpoints_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/grafana/alerting/notify" + "github.com/grafana/alerting/notify/notifytest" "github.com/grafana/alerting/receivers/schema" "github.com/prometheus/alertmanager/config" "github.com/stretchr/testify/assert" @@ -425,7 +426,7 @@ func TestIntegrationContactPointServiceDecryptRedact(t *testing.T) { } func TestRemoveSecretsForContactPoint(t *testing.T) { - overrides := map[string]func(settings map[string]any){ + overrides := map[schema.IntegrationType]func(settings map[string]any){ "webhook": func(settings map[string]any) { // add additional field to the settings because valid config does not allow it to be specified along with password settings["authorization_credentials"] = "test-authz-creds" }, @@ -437,23 +438,23 @@ func TestRemoveSecretsForContactPoint(t *testing.T) { }, } - configs := notify.AllKnownConfigsForTesting + configs := notifytest.AllKnownV1ConfigsForTesting keys := maps.Keys(configs) slices.Sort(keys) for _, integrationType := range keys { - integration := models.IntegrationGen(models.IntegrationMuts.WithValidConfig(schema.IntegrationType(integrationType)))() + integration := models.IntegrationGen(models.IntegrationMuts.WithValidConfig(integrationType))() if f, ok := overrides[integrationType]; ok { f(integration.Settings) } settingsRaw, err := json.Marshal(integration.Settings) require.NoError(t, err) - typeSchema, _ := notify.GetSchemaVersionForIntegration(schema.IntegrationType(integrationType), schema.V1) + typeSchema, _ := notify.GetSchemaVersionForIntegration(integrationType, schema.V1) expectedFields := typeSchema.GetSecretFieldsPaths() - t.Run(integrationType, func(t *testing.T) { + t.Run(string(integrationType), func(t *testing.T) { cp := definitions.EmbeddedContactPoint{ - Name: "integration-" + integrationType, - Type: integrationType, + Name: "integration-" + string(integrationType), + Type: string(integrationType), Settings: simplejson.MustJson(settingsRaw), } secureFields, err := RemoveSecretsForContactPoint(&cp) diff --git a/pkg/services/ngalert/remote/alertmanager.go b/pkg/services/ngalert/remote/alertmanager.go index 7048810195f..8cde9ee198f 100644 --- a/pkg/services/ngalert/remote/alertmanager.go +++ b/pkg/services/ngalert/remote/alertmanager.go @@ -609,10 +609,7 @@ func (am *Alertmanager) TestReceivers(ctx context.Context, c apimodels.TestRecei return nil, 0, fmt.Errorf("failed to decrypt receivers: %w", err) } - apiReceivers := make([]*alertingNotify.APIReceiver, 0, len(c.Receivers)) - for _, r := range decryptedReceivers { - apiReceivers = append(apiReceivers, notifier.PostableApiReceiverToApiReceiver(r)) - } + apiReceivers := alertingNotify.PostableAPIReceiversToAPIReceivers(decryptedReceivers) var alert *alertingNotify.TestReceiversConfigAlertParams if c.Alert != nil { alert = &alertingNotify.TestReceiversConfigAlertParams{Annotations: c.Alert.Annotations, Labels: c.Alert.Labels} diff --git a/pkg/services/ngalert/remote/mock/remoteAlertmanager.go b/pkg/services/ngalert/remote/mock/remoteAlertmanager.go index 1199b65ae9c..8f8e506c765 100644 --- a/pkg/services/ngalert/remote/mock/remoteAlertmanager.go +++ b/pkg/services/ngalert/remote/mock/remoteAlertmanager.go @@ -358,23 +358,23 @@ func (_c *RemoteAlertmanagerMock_GetAlerts_Call) RunAndReturn(run func(context.C } // GetReceivers provides a mock function with given fields: ctx -func (_m *RemoteAlertmanagerMock) GetReceivers(ctx context.Context) ([]alertingmodels.Receiver, error) { +func (_m *RemoteAlertmanagerMock) GetReceivers(ctx context.Context) ([]alertingmodels.ReceiverStatus, error) { ret := _m.Called(ctx) if len(ret) == 0 { panic("no return value specified for GetReceivers") } - var r0 []alertingmodels.Receiver + var r0 []alertingmodels.ReceiverStatus var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]alertingmodels.Receiver, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context) ([]alertingmodels.ReceiverStatus, error)); ok { return rf(ctx) } - if rf, ok := ret.Get(0).(func(context.Context) []alertingmodels.Receiver); ok { + if rf, ok := ret.Get(0).(func(context.Context) []alertingmodels.ReceiverStatus); ok { r0 = rf(ctx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]alertingmodels.Receiver) + r0 = ret.Get(0).([]alertingmodels.ReceiverStatus) } } @@ -405,12 +405,12 @@ func (_c *RemoteAlertmanagerMock_GetReceivers_Call) Run(run func(ctx context.Con return _c } -func (_c *RemoteAlertmanagerMock_GetReceivers_Call) Return(_a0 []alertingmodels.Receiver, _a1 error) *RemoteAlertmanagerMock_GetReceivers_Call { +func (_c *RemoteAlertmanagerMock_GetReceivers_Call) Return(_a0 []alertingmodels.ReceiverStatus, _a1 error) *RemoteAlertmanagerMock_GetReceivers_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *RemoteAlertmanagerMock_GetReceivers_Call) RunAndReturn(run func(context.Context) ([]alertingmodels.Receiver, error)) *RemoteAlertmanagerMock_GetReceivers_Call { +func (_c *RemoteAlertmanagerMock_GetReceivers_Call) RunAndReturn(run func(context.Context) ([]alertingmodels.ReceiverStatus, error)) *RemoteAlertmanagerMock_GetReceivers_Call { _c.Call.Return(run) return _c } diff --git a/pkg/services/ngalert/schedule/alert_rule.go b/pkg/services/ngalert/schedule/alert_rule.go index 523dcf0d86c..3991df89e77 100644 --- a/pkg/services/ngalert/schedule/alert_rule.go +++ b/pkg/services/ngalert/schedule/alert_rule.go @@ -471,7 +471,7 @@ func (a *alertRule) evaluate(ctx context.Context, e *Evaluation, span trace.Span e.scheduledAt, e.rule, results, - state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder), + state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder, a.featureToggles), func(ctx context.Context, statesToSend state.StateTransitions) { start := a.clock.Now() alerts := a.send(ctx, logger, statesToSend) diff --git a/pkg/services/ngalert/schedule/alert_rule_test.go b/pkg/services/ngalert/schedule/alert_rule_test.go index 5e301c5aa08..48be6bb4a40 100644 --- a/pkg/services/ngalert/schedule/alert_rule_test.go +++ b/pkg/services/ngalert/schedule/alert_rule_test.go @@ -1317,7 +1317,7 @@ func stateForRule(rule *models.AlertRule, ts time.Time, evalState eval.State) *s for k, v := range rule.Labels { s.Labels[k] = v } - for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true) { + for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true, featuremgmt.WithFeatures()) { if _, ok := s.Labels[k]; !ok { s.Labels[k] = v } diff --git a/pkg/services/ngalert/schedule/registry.go b/pkg/services/ngalert/schedule/registry.go index 240c62bdc40..ede71c94d75 100644 --- a/pkg/services/ngalert/schedule/registry.go +++ b/pkg/services/ngalert/schedule/registry.go @@ -304,7 +304,7 @@ func (r ruleWithFolder) Fingerprint() fingerprint { } for _, setting := range rule.NotificationSettings { - binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint())) + binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint(nil))) writeBytes(tmp) } diff --git a/pkg/services/ngalert/state/state.go b/pkg/services/ngalert/state/state.go index db807953b13..9df04681353 100644 --- a/pkg/services/ngalert/state/state.go +++ b/pkg/services/ngalert/state/state.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/grafana/pkg/apimachinery/errutil" "github.com/grafana/grafana/pkg/expr" "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/ngalert/eval" "github.com/grafana/grafana/pkg/services/ngalert/models" "github.com/grafana/grafana/pkg/services/screenshot" @@ -753,7 +754,7 @@ func ParseFormattedState(stateStr string) (eval.State, string, error) { } // GetRuleExtraLabels returns a map of built-in labels that should be added to an alert before it is sent to the Alertmanager or its state is cached. -func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool) map[string]string { +func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool, features featuremgmt.FeatureToggles) map[string]string { extraLabels := make(map[string]string, 4) extraLabels[alertingModels.NamespaceUIDLabel] = rule.NamespaceUID @@ -771,7 +772,7 @@ func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string ignored, _ := json.Marshal(rule.NotificationSettings[1:]) l.Error("Detected multiple notification settings, which is not supported. Only the first will be applied", "ignored_settings", string(ignored)) } - return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels()) + return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels(features)) } return extraLabels } diff --git a/pkg/services/ngalert/state/state_test.go b/pkg/services/ngalert/state/state_test.go index 8daf82f9a66..acffa24e2cc 100644 --- a/pkg/services/ngalert/state/state_test.go +++ b/pkg/services/ngalert/state/state_test.go @@ -779,7 +779,7 @@ func TestGetRuleExtraLabels(t *testing.T) { models.RuleUIDLabel: rule.UID, ngmodels.AutogeneratedRouteLabel: "true", ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver, - ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(), + ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(), }, }, "ignore_multiple_notifications": { @@ -794,14 +794,14 @@ func TestGetRuleExtraLabels(t *testing.T) { models.RuleUIDLabel: rule.UID, ngmodels.AutogeneratedRouteLabel: "true", ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver, - ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(), + ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(), }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder) + result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder, nil) require.Equal(t, tc.expected, result) }) } diff --git a/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go b/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go index feec2e53db0..eac01f6c636 100644 --- a/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go +++ b/pkg/services/pluginsintegration/plugincontext/plugincontext_test.go @@ -41,8 +41,10 @@ func TestGet(t *testing.T) { cfg := setting.NewCfg() ds := &fakeDatasources.FakeDataSourceService{} db := &dbtest.FakeDB{ExpectedError: pluginsettings.ErrPluginSettingNotFound} + store, err := pluginstore.NewPluginStoreForTest(preg, &pluginFakes.FakeLoader{}, &pluginFakes.FakeSourceRegistry{}) + require.NoError(t, err) pcp := plugincontext.ProvideService(cfg, localcache.ProvideService(), - pluginstore.New(preg, &pluginFakes.FakeLoader{}), &fakeDatasources.FakeCacheService{}, + store, &fakeDatasources.FakeCacheService{}, ds, pluginSettings.ProvideService(db, secretstest.NewFakeSecretsService()), pluginconfig.NewFakePluginRequestConfigProvider(), ) identity := &user.SignedInUser{OrgID: int64(1), Login: "admin"} diff --git a/pkg/services/pluginsintegration/plugininstaller/service.go b/pkg/services/pluginsintegration/plugininstaller/service.go index 543d354c425..7232703820c 100644 --- a/pkg/services/pluginsintegration/plugininstaller/service.go +++ b/pkg/services/pluginsintegration/plugininstaller/service.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/plugins/repo" @@ -18,6 +19,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ServiceName = "plugin.backgroundinstaller" + var ( installRequestCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "plugins", @@ -36,6 +39,7 @@ var ( ) type Service struct { + services.NamedService cfg *setting.Cfg log log.Logger pluginInstaller plugins.Installer @@ -43,6 +47,7 @@ type Service struct { pluginRepo repo.Service features featuremgmt.FeatureToggles updateChecker pluginchecker.PluginUpdateChecker + installComplete chan struct{} // closed when all plugins are installed (used for testing) } func ProvideService( @@ -60,21 +65,18 @@ func ProvideService( }) s := &Service{ - log: log.New("plugin.backgroundinstaller"), + log: log.New(ServiceName), cfg: cfg, pluginInstaller: pluginInstaller, pluginStore: pluginStore, pluginRepo: pluginRepo, features: features, updateChecker: updateChecker, + installComplete: make(chan struct{}), } - if len(cfg.PreinstallPluginsSync) > 0 { - // Block initialization process until plugins are installed - err := s.installPluginsWithTimeout(cfg.PreinstallPluginsSync) - if err != nil { - return nil, err - } - } + + s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName) + return s, nil } @@ -83,24 +85,6 @@ func (s *Service) IsDisabled() bool { return len(s.cfg.PreinstallPluginsAsync) == 0 } -func (s *Service) installPluginsWithTimeout(pluginsToInstall []setting.InstallPlugin) error { - // Installation process does not timeout by default nor reuses the context - // passed to the request so we need to handle the timeout here. - // We could make this timeout configurable in the future. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - done := make(chan struct{ err error }) - go func() { - done <- struct{ err error }{err: s.installPlugins(ctx, pluginsToInstall, true)} - }() - select { - case <-ctx.Done(): - return fmt.Errorf("failed to install plugins: %w", ctx.Err()) - case d := <-done: - return d.err - } -} - func (s *Service) shouldUpdate(ctx context.Context, pluginID, currentVersion string, pluginURL string) bool { // If the plugin is installed from a URL, we cannot check for updates as we do not have the version information // from the repository. Therefore, we assume that the plugin should be updated if the URL is provided. @@ -166,11 +150,34 @@ func (s *Service) installPlugins(ctx context.Context, pluginsToInstall []setting return nil } -func (s *Service) Run(ctx context.Context) error { - err := s.installPlugins(ctx, s.cfg.PreinstallPluginsAsync, false) - if err != nil { - // Unexpected error, asynchronous installation should not return errors - s.log.Error("Failed to install plugins", "error", err) +func (s *Service) starting(ctx context.Context) error { + if len(s.cfg.PreinstallPluginsSync) > 0 { + s.log.Info("Installing plugins", "plugins", s.cfg.PreinstallPluginsSync) + if err := s.installPlugins(ctx, s.cfg.PreinstallPluginsSync, true); err != nil { + s.log.Error("Failed to install plugins", "error", err) + return err + } } + s.log.Info("Plugins installed", "plugins", s.cfg.PreinstallPluginsSync) return nil } + +func (s *Service) running(ctx context.Context) error { + if len(s.cfg.PreinstallPluginsAsync) > 0 { + s.log.Info("Installing plugins", "plugins", s.cfg.PreinstallPluginsAsync) + if err := s.installPlugins(ctx, s.cfg.PreinstallPluginsAsync, false); err != nil { + s.log.Error("Failed to install plugins", "error", err) + return err + } + } + close(s.installComplete) + <-ctx.Done() + return nil +} + +func (s *Service) Run(ctx context.Context) error { + if err := s.StartAsync(ctx); err != nil { + return err + } + return s.AwaitTerminated(ctx) +} diff --git a/pkg/services/pluginsintegration/plugininstaller/service_test.go b/pkg/services/pluginsintegration/plugininstaller/service_test.go index 1d6c3059eac..7a846933db2 100644 --- a/pkg/services/pluginsintegration/plugininstaller/service_test.go +++ b/pkg/services/pluginsintegration/plugininstaller/service_test.go @@ -26,7 +26,7 @@ func TestService_IsDisabled(t *testing.T) { &setting.Cfg{ PreinstallPluginsAsync: []setting.InstallPlugin{{ID: "myplugin"}}, }, - pluginstore.New(registry.NewInMemory(), &fakes.FakeLoader{}), + pluginstore.New(registry.NewInMemory(), &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}), &fakes.FakePluginInstaller{}, prometheus.NewRegistry(), &fakes.FakePluginRepo{}, @@ -160,12 +160,14 @@ func TestService_Run(t *testing.T) { } installed := 0 installedFromURL := 0 + store, err := pluginstore.NewPluginStoreForTest(preg, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) s, err := ProvideService( &setting.Cfg{ PreinstallPluginsAsync: tt.pluginsToInstall, PreinstallPluginsSync: tt.pluginsToInstallSync, }, - pluginstore.New(preg, &fakes.FakeLoader{}), + store, &fakes.FakePluginInstaller{ AddFunc: func(ctx context.Context, pluginID string, version string, opts plugins.AddOpts) error { for _, plugin := range tt.pluginsToFail { @@ -203,13 +205,26 @@ func TestService_Run(t *testing.T) { &pluginchecker.FakePluginPreinstall{}, ), ) + require.NoError(t, err) + + t.Cleanup(func() { + s.StopAsync() + err := s.AwaitTerminated(context.Background()) + if tt.shouldThrowError { + require.ErrorContains(t, err, "Failed to install plugin") + return + } + require.NoError(t, err) + }) + + err = s.StartAsync(context.Background()) + require.NoError(t, err) + err = s.AwaitRunning(context.Background()) if tt.shouldThrowError { require.ErrorContains(t, err, "Failed to install plugin") return } require.NoError(t, err) - err = s.Run(context.Background()) - require.NoError(t, err) if tt.shouldInstall { expectedInstalled := 0 @@ -232,6 +247,7 @@ func TestService_Run(t *testing.T) { expectedInstalled++ } } + <-s.installComplete require.Equal(t, expectedInstalled, installed) require.Equal(t, expectedInstalledFromURL, installedFromURL) } diff --git a/pkg/services/pluginsintegration/pluginstore/store.go b/pkg/services/pluginsintegration/pluginstore/store.go index 20ef15379cf..f486218040e 100644 --- a/pkg/services/pluginsintegration/pluginstore/store.go +++ b/pkg/services/pluginsintegration/pluginstore/store.go @@ -3,18 +3,21 @@ package pluginstore import ( "context" "sort" - "sync" "time" + "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/plugins/manager/loader" "github.com/grafana/grafana/pkg/plugins/manager/registry" "github.com/grafana/grafana/pkg/plugins/manager/sources" + "golang.org/x/sync/errgroup" ) var _ Store = (*Service)(nil) +const ServiceName = "plugins.store" + // Store is the publicly accessible storage for plugins. type Store interface { // Plugin finds a plugin by its ID. @@ -25,47 +28,81 @@ type Store interface { } type Service struct { + services.NamedService + pluginRegistry registry.Service pluginLoader loader.Service + pluginSources sources.Registry } func ProvideService(pluginRegistry registry.Service, pluginSources sources.Registry, - pluginLoader loader.Service) (*Service, error) { - ctx := context.Background() + pluginLoader loader.Service) *Service { + return New(pluginRegistry, pluginLoader, pluginSources) +} + +func (s *Service) Run(ctx context.Context) error { + if err := s.StartAsync(ctx); err != nil { + return err + } + stopCtx := context.Background() + return s.AwaitTerminated(stopCtx) +} + +func NewPluginStoreForTest(pluginRegistry registry.Service, pluginLoader loader.Service, pluginSources sources.Registry) (*Service, error) { + s := New(pluginRegistry, pluginLoader, pluginSources) + if err := s.StartAsync(context.Background()); err != nil { + return nil, err + } + if err := s.AwaitRunning(context.Background()); err != nil { + return nil, err + } + return s, nil +} + +func New(pluginRegistry registry.Service, pluginLoader loader.Service, pluginSources sources.Registry) *Service { + s := &Service{ + pluginRegistry: pluginRegistry, + pluginLoader: pluginLoader, + pluginSources: pluginSources, + } + s.NamedService = services.NewBasicService(s.starting, s.running, s.stopping).WithName(ServiceName) + return s +} + +func (s *Service) starting(ctx context.Context) error { start := time.Now() totalPlugins := 0 - logger := log.New("plugin.store") + logger := log.New(ServiceName) logger.Info("Loading plugins...") - for _, ps := range pluginSources.List(ctx) { - loadedPlugins, err := pluginLoader.Load(ctx, ps) + for _, ps := range s.pluginSources.List(ctx) { + loadedPlugins, err := s.pluginLoader.Load(ctx, ps) if err != nil { logger.Error("Loading plugin source failed", "source", ps.PluginClass(ctx), "error", err) - return nil, err + return err } - totalPlugins += len(loadedPlugins) } logger.Info("Plugins loaded", "count", totalPlugins, "duration", time.Since(start)) - return New(pluginRegistry, pluginLoader), nil + return nil } -func (s *Service) Run(ctx context.Context) error { +func (s *Service) running(ctx context.Context) error { <-ctx.Done() - s.shutdown(ctx) - return ctx.Err() + return nil } -func New(pluginRegistry registry.Service, pluginLoader loader.Service) *Service { - return &Service{ - pluginRegistry: pluginRegistry, - pluginLoader: pluginLoader, - } +func (s *Service) stopping(failureReason error) error { + return s.shutdown(context.Background()) } func (s *Service) Plugin(ctx context.Context, pluginID string) (Plugin, bool) { + if err := s.AwaitRunning(ctx); err != nil { + log.New(ServiceName).FromContext(ctx).Error("Failed to get plugin", "error", err) + return Plugin{}, false + } p, exists := s.plugin(ctx, pluginID) if !exists { return Plugin{}, false @@ -75,6 +112,10 @@ func (s *Service) Plugin(ctx context.Context, pluginID string) (Plugin, bool) { } func (s *Service) Plugins(ctx context.Context, pluginTypes ...plugins.Type) []Plugin { + if err := s.AwaitRunning(ctx); err != nil { + log.New(ServiceName).FromContext(ctx).Error("Failed to get plugins", "error", err) + return []Plugin{} + } // if no types passed, assume all if len(pluginTypes) == 0 { pluginTypes = plugins.PluginTypes @@ -125,6 +166,10 @@ func (s *Service) availablePlugins(ctx context.Context) []*plugins.Plugin { } func (s *Service) Routes(ctx context.Context) []*plugins.StaticRoute { + if err := s.AwaitRunning(ctx); err != nil { + log.New(ServiceName).FromContext(ctx).Error("Failed to get routes", "error", err) + return []*plugins.StaticRoute{} + } staticRoutes := make([]*plugins.StaticRoute, 0) for _, p := range s.availablePlugins(ctx) { @@ -135,18 +180,20 @@ func (s *Service) Routes(ctx context.Context) []*plugins.StaticRoute { return staticRoutes } -func (s *Service) shutdown(ctx context.Context) { - var wg sync.WaitGroup - for _, plugin := range s.pluginRegistry.Plugins(ctx) { - wg.Add(1) - go func(ctx context.Context, p *plugins.Plugin) { - defer wg.Done() - p.Logger().Debug("Stopping plugin") - if _, err := s.pluginLoader.Unload(ctx, p); err != nil { - p.Logger().Error("Failed to stop plugin", "error", err) +func (s *Service) shutdown(ctx context.Context) error { + var errgroup errgroup.Group + plugins := s.pluginRegistry.Plugins(ctx) + for _, p := range plugins { + plugin := p // capture loop variable + errgroup.Go(func() error { + plugin.Logger().Debug("Stopping plugin") + if _, err := s.pluginLoader.Unload(ctx, plugin); err != nil { + plugin.Logger().Error("Failed to stop plugin", "error", err) + return err } - p.Logger().Debug("Plugin stopped") - }(ctx, plugin) + plugin.Logger().Debug("Plugin stopped") + return nil + }) } - wg.Wait() + return errgroup.Wait() } diff --git a/pkg/services/pluginsintegration/pluginstore/store_test.go b/pkg/services/pluginsintegration/pluginstore/store_test.go index b0c0b408cf9..e195fe5e932 100644 --- a/pkg/services/pluginsintegration/pluginstore/store_test.go +++ b/pkg/services/pluginsintegration/pluginstore/store_test.go @@ -2,7 +2,7 @@ package pluginstore import ( "context" - "sync" + "errors" "testing" "github.com/stretchr/testify/require" @@ -43,7 +43,11 @@ func TestStore_ProvideService(t *testing.T) { } }} - _, err := ProvideService(fakes.NewFakePluginRegistry(), srcs, l) + service := ProvideService(fakes.NewFakePluginRegistry(), srcs, l) + ctx := context.Background() + err := service.StartAsync(ctx) + require.NoError(t, err) + err = service.AwaitRunning(ctx) require.NoError(t, err) require.Equal(t, []plugins.Class{"1", "2", "3"}, loadedSrcs) }) @@ -55,12 +59,13 @@ func TestStore_Plugin(t *testing.T) { p1.RegisterClient(&DecommissionedPlugin{}) p2 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-panel"}} - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) p, exists := ps.Plugin(context.Background(), p1.ID) require.False(t, exists) @@ -81,7 +86,7 @@ func TestStore_Plugins(t *testing.T) { p5 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "e-test-panel", Type: plugins.TypePanel}} p5.RegisterClient(&DecommissionedPlugin{}) - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, @@ -89,7 +94,8 @@ func TestStore_Plugins(t *testing.T) { p4.ID: p4, p5.ID: p5, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) ToGrafanaDTO(p1) pss := ps.Plugins(context.Background()) @@ -124,7 +130,7 @@ func TestStore_Routes(t *testing.T) { p6 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "f-test-app", Type: plugins.TypeApp}} p6.RegisterClient(&DecommissionedPlugin{}) - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, @@ -132,7 +138,8 @@ func TestStore_Routes(t *testing.T) { p5.ID: p5, p6.ID: p6, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) sr := func(p *plugins.Plugin) *plugins.StaticRoute { return &plugins.StaticRoute{PluginID: p.ID, Directory: p.FS.Base()} @@ -144,39 +151,62 @@ func TestStore_Routes(t *testing.T) { } func TestProcessManager_shutdown(t *testing.T) { - p := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-datasource", Type: plugins.TypeDataSource}} // Backend: true - backend := &fakes.FakeBackendPlugin{} - p.RegisterClient(backend) - p.SetLogger(log.NewTestLogger()) + t.Run("When context is cancelled the plugin is stopped", func(t *testing.T) { + p := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-datasource", Type: plugins.TypeDataSource}} // Backend: true + backend := &fakes.FakeBackendPlugin{} + p.RegisterClient(backend) + p.SetLogger(log.NewTestLogger()) - unloaded := false - ps := New(&fakes.FakePluginRegistry{ - Store: map[string]*plugins.Plugin{ - p.ID: p, - }, - }, &fakes.FakeLoader{ - UnloadFunc: func(_ context.Context, plugin *plugins.Plugin) (*plugins.Plugin, error) { - require.Equal(t, p, plugin) - unloaded = true - return nil, nil - }, + unloaded := false + ps := New(&fakes.FakePluginRegistry{ + Store: map[string]*plugins.Plugin{ + p.ID: p, + }, + }, &fakes.FakeLoader{ + UnloadFunc: func(_ context.Context, plugin *plugins.Plugin) (*plugins.Plugin, error) { + require.Equal(t, p, plugin) + unloaded = true + return nil, nil + }, + }, &fakes.FakeSourceRegistry{}) + + ctx, cancel := context.WithCancel(context.Background()) + + err := ps.StartAsync(ctx) + require.NoError(t, err) + err = ps.AwaitRunning(ctx) + require.NoError(t, err) + + // Cancel context to trigger shutdown + cancel() + + // Wait for service to be fully terminated + err = ps.AwaitTerminated(context.Background()) + require.NoError(t, err) + require.True(t, unloaded) }) - pCtx := context.Background() - cCtx, cancel := context.WithCancel(pCtx) - var wgRun sync.WaitGroup - wgRun.Add(1) - var runErr error - go func() { - runErr = ps.Run(cCtx) - wgRun.Done() - }() + t.Run("When shutdown fails, stopping method returns error", func(t *testing.T) { + p := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-datasource", Type: plugins.TypeDataSource}} + backend := &fakes.FakeBackendPlugin{} + p.RegisterClient(backend) + p.SetLogger(log.NewTestLogger()) - t.Run("When context is cancelled the plugin is stopped", func(t *testing.T) { - cancel() - wgRun.Wait() - require.ErrorIs(t, runErr, context.Canceled) - require.True(t, unloaded) + expectedErr := errors.New("unload failed") + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ + Store: map[string]*plugins.Plugin{ + p.ID: p, + }, + }, &fakes.FakeLoader{ + UnloadFunc: func(_ context.Context, plugin *plugins.Plugin) (*plugins.Plugin, error) { + return nil, expectedErr + }, + }, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) + + err = ps.stopping(nil) + require.Error(t, err) + require.ErrorIs(t, err, expectedErr) }) } @@ -186,12 +216,13 @@ func TestStore_availablePlugins(t *testing.T) { p1.RegisterClient(&DecommissionedPlugin{}) p2 := &plugins.Plugin{JSONData: plugins.JSONData{ID: "test-app"}} - ps := New(&fakes.FakePluginRegistry{ + ps, err := NewPluginStoreForTest(&fakes.FakePluginRegistry{ Store: map[string]*plugins.Plugin{ p1.ID: p1, p2.ID: p2, }, - }, &fakes.FakeLoader{}) + }, &fakes.FakeLoader{}, &fakes.FakeSourceRegistry{}) + require.NoError(t, err) aps := ps.availablePlugins(context.Background()) require.Len(t, aps, 1) diff --git a/pkg/services/pluginsintegration/test_helper.go b/pkg/services/pluginsintegration/test_helper.go index 8fec1925eb3..6689430c43b 100644 --- a/pkg/services/pluginsintegration/test_helper.go +++ b/pkg/services/pluginsintegration/test_helper.go @@ -67,7 +67,7 @@ func CreateIntegrationTestCtx(t *testing.T, cfg *setting.Cfg, coreRegistry *core Terminator: term, }) - ps, err := pluginstore.ProvideService(reg, sources.ProvideService(cfg, pCfg), l) + ps, err := pluginstore.NewPluginStoreForTest(reg, l, sources.ProvideService(cfg, pCfg)) require.NoError(t, err) return &IntegrationTestCtx{ diff --git a/pkg/services/preference/model.go b/pkg/services/preference/model.go index edcde2f9fb2..26fdc11ac02 100644 --- a/pkg/services/preference/model.go +++ b/pkg/services/preference/model.go @@ -74,6 +74,12 @@ type SavePreferenceCommand struct { Navbar *NavbarPreference `json:"navbar,omitempty"` } +// One (and only one) of the values must be non-zero +type DeleteCommand struct { + OrgID int64 + UserID int64 + TeamID int64 +} type PatchPreferenceCommand struct { UserID int64 OrgID int64 diff --git a/pkg/services/preference/pref.go b/pkg/services/preference/pref.go index 1da5345d16a..6c6697d442a 100644 --- a/pkg/services/preference/pref.go +++ b/pkg/services/preference/pref.go @@ -10,5 +10,5 @@ type Service interface { Save(context.Context, *SavePreferenceCommand) error Patch(context.Context, *PatchPreferenceCommand) error GetDefaults() *Preference - DeleteByUser(context.Context, int64) error + Delete(context.Context, *DeleteCommand) error } diff --git a/pkg/services/preference/prefimpl/inmemory_test.go b/pkg/services/preference/prefimpl/inmemory_test.go index 63625cdb397..d24d230e0b7 100644 --- a/pkg/services/preference/prefimpl/inmemory_test.go +++ b/pkg/services/preference/prefimpl/inmemory_test.go @@ -121,6 +121,6 @@ func (s *inmemStore) Update(ctx context.Context, preference *pref.Preference) er return nil } -func (s *inmemStore) DeleteByUser(ctx context.Context, userID int64) error { +func (s *inmemStore) Delete(context.Context, *pref.DeleteCommand) error { panic("not yet implemented") } diff --git a/pkg/services/preference/prefimpl/pref.go b/pkg/services/preference/prefimpl/pref.go index 392f0617048..429feedfa79 100644 --- a/pkg/services/preference/prefimpl/pref.go +++ b/pkg/services/preference/prefimpl/pref.go @@ -272,8 +272,8 @@ func (s *Service) GetDefaults() *pref.Preference { } } -func (s *Service) DeleteByUser(ctx context.Context, userID int64) error { - return s.store.DeleteByUser(ctx, userID) +func (s *Service) Delete(ctx context.Context, cmd *pref.DeleteCommand) error { + return s.store.Delete(ctx, cmd) } func parseCookiePreferences(prefs []pref.CookieType) (map[string]struct{}, error) { diff --git a/pkg/services/preference/prefimpl/store.go b/pkg/services/preference/prefimpl/store.go index 7c8575a8a07..192f92cfb04 100644 --- a/pkg/services/preference/prefimpl/store.go +++ b/pkg/services/preference/prefimpl/store.go @@ -12,5 +12,5 @@ type store interface { // Insert adds a new preference and returns its sequential ID Insert(context.Context, *pref.Preference) (int64, error) Update(context.Context, *pref.Preference) error - DeleteByUser(context.Context, int64) error + Delete(context.Context, *pref.DeleteCommand) error } diff --git a/pkg/services/preference/prefimpl/store_test.go b/pkg/services/preference/prefimpl/store_test.go index 40015e80425..4bb971029cc 100644 --- a/pkg/services/preference/prefimpl/store_test.go +++ b/pkg/services/preference/prefimpl/store_test.go @@ -185,9 +185,10 @@ func testIntegrationPreferencesDataAccess(t *testing.T, fn getStore) { require.NoError(t, err) }) t.Run("delete preference by user", func(t *testing.T) { - err := prefStore.DeleteByUser(context.Background(), user.SignedInUser{}.UserID) + userId := int64(1) + err := prefStore.Delete(context.Background(), &pref.DeleteCommand{UserID: userId}) require.NoError(t, err) - query := &pref.Preference{OrgID: 0, UserID: user.SignedInUser{}.UserID, TeamID: 0} + query := &pref.Preference{OrgID: 0, UserID: userId, TeamID: 0} _, err = prefStore.Get(context.Background(), query) require.EqualError(t, err, pref.ErrPrefNotFound.Error()) }) diff --git a/pkg/services/preference/prefimpl/xorm_store.go b/pkg/services/preference/prefimpl/xorm_store.go index 286abd57885..1b5b2535829 100644 --- a/pkg/services/preference/prefimpl/xorm_store.go +++ b/pkg/services/preference/prefimpl/xorm_store.go @@ -2,6 +2,7 @@ package prefimpl import ( "context" + "fmt" "strings" "github.com/grafana/grafana/pkg/infra/db" @@ -80,10 +81,27 @@ func (s *sqlStore) Insert(ctx context.Context, cmd *pref.Preference) (int64, err return ID, err } -func (s *sqlStore) DeleteByUser(ctx context.Context, userID int64) error { - return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { - var rawSQL = "DELETE FROM preferences WHERE user_id = ?" - _, err := dbSession.Exec(rawSQL, userID) - return err - }) +func (s *sqlStore) Delete(ctx context.Context, cmd *pref.DeleteCommand) error { + if cmd.UserID > 0 { + return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { + var rawSQL = "DELETE FROM preferences WHERE user_id = ?" + _, err := dbSession.Exec(rawSQL, cmd.UserID) + return err + }) + } + if cmd.TeamID > 0 { + return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { + var rawSQL = "DELETE FROM preferences WHERE team_id = ?" + _, err := dbSession.Exec(rawSQL, cmd.TeamID) + return err + }) + } + if cmd.OrgID > 0 { + return s.db.WithDbSession(ctx, func(dbSession *db.Session) error { + var rawSQL = "DELETE FROM preferences WHERE org_id = ? AND user_id=0 AND team_id=0" + _, err := dbSession.Exec(rawSQL, cmd.OrgID) + return err + }) + } + return fmt.Errorf("expecting one of team, org, user to be non-zero") } diff --git a/pkg/services/preference/preftest/fake.go b/pkg/services/preference/preftest/fake.go index 8c6d7e07708..c8cf6b0be19 100644 --- a/pkg/services/preference/preftest/fake.go +++ b/pkg/services/preference/preftest/fake.go @@ -35,6 +35,6 @@ func (f *FakePreferenceService) Patch(ctx context.Context, cmd *pref.PatchPrefer return f.ExpectedError } -func (f *FakePreferenceService) DeleteByUser(context.Context, int64) error { +func (f *FakePreferenceService) Delete(context.Context, *pref.DeleteCommand) error { return f.ExpectedError } diff --git a/pkg/services/provisioning/alerting/rules_types.go b/pkg/services/provisioning/alerting/rules_types.go index b5b28072beb..bac091ababa 100644 --- a/pkg/services/provisioning/alerting/rules_types.go +++ b/pkg/services/provisioning/alerting/rules_types.go @@ -151,10 +151,6 @@ func (rule *AlertRuleV1) mapToModel(orgID int64) (models.AlertRule, error) { noDataState = models.NoData } alertRule.NoDataState = noDataState - alertRule.Condition = rule.Condition.Value() - if alertRule.Condition == "" { - return models.AlertRule{}, fmt.Errorf("rule '%s' failed to parse: no condition set", alertRule.Title) - } alertRule.Annotations = rule.Annotations.Raw alertRule.Labels = rule.Labels.Value() for _, queryV1 := range rule.Data { @@ -182,6 +178,10 @@ func (rule *AlertRuleV1) mapToModel(orgID int64) (models.AlertRule, error) { } alertRule.Record = &record } + alertRule.Condition = rule.Condition.Value() + if alertRule.Condition == "" && alertRule.Record == nil { + return models.AlertRule{}, fmt.Errorf("rule '%s' failed to parse: no condition set", alertRule.Title) + } return alertRule, nil } diff --git a/pkg/services/provisioning/alerting/rules_types_test.go b/pkg/services/provisioning/alerting/rules_types_test.go index 16c510707b9..9c4e819ff73 100644 --- a/pkg/services/provisioning/alerting/rules_types_test.go +++ b/pkg/services/provisioning/alerting/rules_types_test.go @@ -202,6 +202,14 @@ func TestRules(t *testing.T) { }) } +func TestRecordingRules(t *testing.T) { + t.Run("a valid rule should not error", func(t *testing.T) { + rule := validRecordingRuleV1(t) + _, err := rule.mapToModel(1) + require.NoError(t, err) + }) +} + func TestNotificationsSettingsV1MapToModel(t *testing.T) { tests := []struct { name string @@ -347,6 +355,37 @@ func validRuleV1(t *testing.T) AlertRuleV1 { } } +func validRecordingRuleV1(t *testing.T) AlertRuleV1 { + t.Helper() + var ( + title values.StringValue + uid values.StringValue + forDuration values.StringValue + metric values.StringValue + from values.StringValue + ) + err := yaml.Unmarshal([]byte("test"), &title) + require.NoError(t, err) + err = yaml.Unmarshal([]byte("test_uid"), &uid) + require.NoError(t, err) + err = yaml.Unmarshal([]byte("10s"), &forDuration) + require.NoError(t, err) + err = yaml.Unmarshal([]byte("test_metric"), &metric) + require.NoError(t, err) + err = yaml.Unmarshal([]byte("A"), &from) + require.NoError(t, err) + return AlertRuleV1{ + Title: title, + UID: uid, + For: forDuration, + Record: &RecordV1{ + Metric: metric, + From: from, + }, + Data: []QueryV1{{}}, + } +} + func stringToStringValue(s string) values.StringValue { result := values.StringValue{} err := yaml.Unmarshal([]byte(s), &result) diff --git a/pkg/services/provisioning/provisioning.go b/pkg/services/provisioning/provisioning.go index b40b6136a46..e7e8c3c5fa7 100644 --- a/pkg/services/provisioning/provisioning.go +++ b/pkg/services/provisioning/provisioning.go @@ -7,6 +7,7 @@ import ( "path/filepath" "sync" + "github.com/grafana/dskit/services" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/tracing" @@ -39,6 +40,8 @@ import ( "github.com/grafana/grafana/pkg/storage/legacysql/dualwrite" ) +const ServiceName = "provisioning" + func ProvideService( ac accesscontrol.AccessControl, cfg *setting.Cfg, @@ -91,6 +94,8 @@ func ProvideService( dual: dual, } + s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName) + if err := s.setDashboardProvisioner(); err != nil { return nil, err } @@ -98,6 +103,67 @@ func ProvideService( return s, nil } +func (ps *ProvisioningServiceImpl) starting(ctx context.Context) error { + if err := ps.ProvisionDatasources(ctx); err != nil { + ps.log.Error("Failed to provision data sources", "error", err) + return err + } + + if err := ps.ProvisionPlugins(ctx); err != nil { + ps.log.Error("Failed to provision plugins", "error", err) + return err + } + + if err := ps.ProvisionAlerting(ctx); err != nil { + ps.log.Error("Failed to provision alerting", "error", err) + return err + } + + // Migrating prom types relies on data source provisioning to already be completed + // If we can make services depend on other services completing first, + // then we should remove this from provisioning + if err := ps.migratePrometheusType(ctx); err != nil { + ps.log.Error("Failed to migrate Prometheus type", "error", err) + return err + } + + if err := ps.ProvisionDashboards(ctx); err != nil { + ps.log.Error("Failed to provision dashboard", "error", err) + // Consider the allow list of errors for which running the provisioning service should not + // fail. For now this includes only dashboards.ErrGetOrCreateFolder. + if !errors.Is(err, dashboards.ErrGetOrCreateFolder) { + return err + } + } + if ps.dashboardProvisioner.HasDashboardSources() { + ps.searchService.TriggerReIndex() + } + return nil +} + +func (ps *ProvisioningServiceImpl) running(ctx context.Context) error { + for { + // Wait for unlock. This is tied to new dashboardProvisioner to be instantiated before we start polling. + ps.mutex.Lock() + // Using background here because otherwise if root context was canceled the select later on would + // non-deterministically take one of the route possibly going into one polling loop before exiting. + pollingContext, cancelFun := context.WithCancel(context.Background()) + ps.pollingCtxCancel = cancelFun + ps.dashboardProvisioner.PollChanges(pollingContext) + ps.mutex.Unlock() + + select { + case <-pollingContext.Done(): + // Polling was canceled. + continue + case <-ctx.Done(): + // Root server context was cancelled so cancel polling and leave. + ps.cancelPolling() + return nil + } + } +} + func (ps *ProvisioningServiceImpl) setDashboardProvisioner() error { dashboardPath := filepath.Join(ps.Cfg.ProvisioningPath, "dashboards") dashProvisioner, err := ps.newDashboardProvisioner(context.Background(), dashboardPath, ps.dashboardProvisioningService, ps.orgService, ps.dashboardService, ps.folderService, ps.dual) @@ -137,6 +203,8 @@ func newProvisioningServiceImpl( migratePrometheusType: migratePrometheusType, } + s.NamedService = services.NewBasicService(s.starting, s.running, nil).WithName(ServiceName) + if err := s.setDashboardProvisioner(); err != nil { return nil, err } @@ -145,6 +213,7 @@ func newProvisioningServiceImpl( } type ProvisioningServiceImpl struct { + services.NamedService Cfg *setting.Cfg SQLStore db.DB orgService org.Service @@ -173,7 +242,6 @@ type ProvisioningServiceImpl struct { resourcePermissions accesscontrol.ReceiverPermissionsService tracer tracing.Tracer dual dualwrite.Service - onceInitProvisioners sync.Once migratePrometheusType func(context.Context) error } @@ -185,78 +253,11 @@ func (ps *ProvisioningServiceImpl) RunInitProvisioners(ctx context.Context) erro } func (ps *ProvisioningServiceImpl) Run(ctx context.Context) error { - var err error - - // Run Datasources, Plugins and Alerting Provisioning only once. - // It can't be initialized at RunInitProvisioners because it - // depends on the /apis endpoints to be already running and listeningq - ps.onceInitProvisioners.Do(func() { - err = ps.ProvisionDatasources(ctx) - if err != nil { - ps.log.Error("Failed to provision data sources", "error", err) - return - } - - err = ps.ProvisionPlugins(ctx) - if err != nil { - ps.log.Error("Failed to provision plugins", "error", err) - return - } - - err = ps.ProvisionAlerting(ctx) - if err != nil { - ps.log.Error("Failed to provision alerting", "error", err) - return - } - - // Migrating prom types relies on data source provisioning to already be completed - // If we can make services depend on other services completing first, - // then we should remove this from provisioning - err = ps.migratePrometheusType(ctx) - if err != nil { - ps.log.Error("Failed to migrate Prometheus type", "error", err) - return - } - }) - - if err != nil { - // error already logged + if err := ps.StartAsync(ctx); err != nil { return err } - - err = ps.ProvisionDashboards(ctx) - if err != nil { - ps.log.Error("Failed to provision dashboard", "error", err) - // Consider the allow list of errors for which running the provisioning service should not - // fail. For now this includes only dashboards.ErrGetOrCreateFolder. - if !errors.Is(err, dashboards.ErrGetOrCreateFolder) { - return err - } - } - if ps.dashboardProvisioner.HasDashboardSources() { - ps.searchService.TriggerReIndex() - } - - for { - // Wait for unlock. This is tied to new dashboardProvisioner to be instantiated before we start polling. - ps.mutex.Lock() - // Using background here because otherwise if root context was canceled the select later on would - // non-deterministically take one of the route possibly going into one polling loop before exiting. - pollingContext, cancelFun := context.WithCancel(context.Background()) - ps.pollingCtxCancel = cancelFun - ps.dashboardProvisioner.PollChanges(pollingContext) - ps.mutex.Unlock() - - select { - case <-pollingContext.Done(): - // Polling was canceled. - continue - case <-ctx.Done(): - // Root server context was cancelled so cancel polling and leave. - ps.cancelPolling() - return ctx.Err() - } - } + stopCtx := context.Background() + return ps.AwaitTerminated(stopCtx) } func (ps *ProvisioningServiceImpl) ProvisionDatasources(ctx context.Context) error { diff --git a/pkg/services/provisioning/provisioning_test.go b/pkg/services/provisioning/provisioning_test.go index ee4abc5029b..8b513ba321e 100644 --- a/pkg/services/provisioning/provisioning_test.go +++ b/pkg/services/provisioning/provisioning_test.go @@ -48,7 +48,7 @@ func TestProvisioningServiceImpl(t *testing.T) { serviceTest.waitForStop() assert.False(t, serviceTest.serviceRunning, "Service should not be running") - assert.Equal(t, context.Canceled, serviceTest.serviceError, "Service should have returned canceled error") + assert.NoError(t, serviceTest.serviceError, "Service should not have returned an error") }) t.Run("Failed reloading does not stop polling with old provisioned", func(t *testing.T) { @@ -91,7 +91,7 @@ func TestProvisioningServiceImpl(t *testing.T) { serviceTest.cancel() serviceTest.waitForStop() - assert.Equal(t, context.Canceled, serviceTest.serviceError) + assert.NoError(t, serviceTest.serviceError, "Service should not have returned an error") }) t.Run("Should return run error when dashboard provisioning fails for non-allow-listed error", func(t *testing.T) { diff --git a/pkg/services/sqlstore/sqlstore.go b/pkg/services/sqlstore/sqlstore.go index b54a177cf02..d4842bee884 100644 --- a/pkg/services/sqlstore/sqlstore.go +++ b/pkg/services/sqlstore/sqlstore.go @@ -16,6 +16,7 @@ import ( _ "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" + "github.com/grafana/grafana/pkg/util/sqlite" "github.com/grafana/grafana/pkg/util/xorm" "github.com/grafana/grafana/pkg/util/xorm/core" @@ -248,6 +249,9 @@ func (ss *SQLStore) initEngine(engine *xorm.Engine) error { } ss.log.Info("Connecting to DB", "dbtype", ss.dbCfg.Type) + if ss.dbCfg.Type == migrator.SQLite { + ss.log.Info("Using SQLite driver", "driver", sqlite.DriverType()) + } if ss.dbCfg.Type == migrator.SQLite && strings.HasPrefix(ss.dbCfg.ConnectionString, "file:") && !strings.HasPrefix(ss.dbCfg.ConnectionString, "file::memory:") { exists, err := fs.Exists(ss.dbCfg.Path) diff --git a/pkg/services/star/api/api.go b/pkg/services/star/api/api.go index 42110ba2c09..32d6cc08ab6 100644 --- a/pkg/services/star/api/api.go +++ b/pkg/services/star/api/api.go @@ -1,56 +1,53 @@ package api import ( - "context" "net/http" "time" + "github.com/grafana/grafana-app-sdk/logging" "github.com/grafana/grafana/pkg/api/response" "github.com/grafana/grafana/pkg/apimachinery/identity" - "github.com/grafana/grafana/pkg/infra/log" + "github.com/grafana/grafana/pkg/services/apiserver" + "github.com/grafana/grafana/pkg/services/apiserver/endpoints/request" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" - "github.com/grafana/grafana/pkg/services/dashboards" + "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/star" + "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/web" ) type API struct { - starService star.Service - dashboardService dashboards.DashboardService - logger log.Logger + starService star.Service + client K8sClients } func ProvideApi( + cfg *setting.Cfg, // for namespacer + features featuremgmt.FeatureToggles, starService star.Service, - dashboardService dashboards.DashboardService, + configProvider apiserver.DirectRestConfigProvider, ) *API { - starLogger := log.New("stars.api") - api := &API{ - starService: starService, - dashboardService: dashboardService, - logger: starLogger, + if features.IsEnabledGlobally(featuremgmt.FlagKubernetesStars) { + starService = nil // don't use it } - return api -} - -func (api *API) getDashboardHelper(ctx context.Context, orgID int64, id int64, uid string) (*dashboards.Dashboard, response.Response) { - var query dashboards.GetDashboardQuery - - if len(uid) > 0 { - query = dashboards.GetDashboardQuery{UID: uid, ID: id, OrgID: orgID} - } else { - query = dashboards.GetDashboardQuery{ID: id, OrgID: orgID} + return &API{ + starService: starService, + client: &k8sClients{ + namespacer: request.GetNamespaceMapper(cfg), + configProvider: configProvider, + }, } - - result, err := api.dashboardService.GetDashboard(ctx, &query) - if err != nil { - return nil, response.Error(http.StatusNotFound, "Dashboard not found", err) - } - - return result, nil } func (api *API) GetStars(c *contextmodel.ReqContext) response.Response { + if api.starService == nil { + stars, err := api.client.GetStars(c) + if err != nil { + logging.FromContext(c.Req.Context()).With("logger", "star.api").Warn("error", "err", err) + } + return response.JSON(http.StatusOK, stars) + } + query := star.GetUserStarsQuery{ UserID: c.UserID, } @@ -86,17 +83,25 @@ func (api *API) StarDashboardByUID(c *contextmodel.ReqContext) response.Response return response.Error(http.StatusBadRequest, "Invalid dashboard UID", nil) } + if api.starService == nil { + err := api.client.AddStar(c, uid) + if err != nil { + return response.Error(http.StatusInternalServerError, "Failed to star dashboard", err) + } + return response.Success("Dashboard starred!") + } + userID, err := identity.UserIdentifier(c.GetID()) if err != nil { return response.Error(http.StatusBadRequest, "Only users and service accounts can star dashboards", nil) } - dash, rsp := api.getDashboardHelper(c.Req.Context(), c.GetOrgID(), 0, uid) + dashboardID, rsp := api.client.GetDashboardID(c, uid) if rsp != nil { return rsp } - cmd := star.StarDashboardCommand{UserID: userID, DashboardID: dash.ID, DashboardUID: uid, OrgID: c.GetOrgID(), Updated: time.Now()} + cmd := star.StarDashboardCommand{UserID: userID, DashboardID: dashboardID, DashboardUID: uid, OrgID: c.GetOrgID(), Updated: time.Now()} if err := api.starService.Add(c.Req.Context(), &cmd); err != nil { return response.Error(http.StatusInternalServerError, "Failed to star dashboard", err) @@ -123,6 +128,14 @@ func (api *API) UnstarDashboardByUID(c *contextmodel.ReqContext) response.Respon return response.Error(http.StatusBadRequest, "Invalid dashboard UID", nil) } + if api.starService == nil { + err := api.client.RemoveStar(c, uid) + if err != nil { + return response.Error(http.StatusInternalServerError, "Failed to unstar dashboard", err) + } + return response.Success("Dashboard unstarred") + } + userID, err := identity.UserIdentifier(c.GetID()) if err != nil { return response.Error(http.StatusBadRequest, "Only users and service accounts can star dashboards", nil) diff --git a/pkg/services/star/api/api_test.go b/pkg/services/star/api/api_test.go index 1469ef46c9a..6912b5c8b7e 100644 --- a/pkg/services/star/api/api_test.go +++ b/pkg/services/star/api/api_test.go @@ -8,16 +8,18 @@ import ( "github.com/stretchr/testify/mock" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" - "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/star/startest" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/web" ) func TestStarDashboardUID(t *testing.T) { - svc := dashboards.NewFakeDashboardService(t) - svc.On("GetDashboard", mock.Anything, mock.Anything).Return(&dashboards.Dashboard{UID: "test", OrgID: 1}, nil) - api := ProvideApi(startest.NewStarServiceFake(), svc) + client := NewMockK8sClients(t) + client.On("GetDashboardID", mock.Anything, mock.Anything).Return(int64(123), nil) + api := &API{ + starService: startest.NewStarServiceFake(), + client: client, + } testCases := []struct { name string diff --git a/pkg/services/star/api/client.go b/pkg/services/star/api/client.go new file mode 100644 index 00000000000..2f4cd238cc7 --- /dev/null +++ b/pkg/services/star/api/client.go @@ -0,0 +1,138 @@ +package api + +import ( + "net/http" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + authlib "github.com/grafana/authlib/types" + dashboardsV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1" + preferencesV1 "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1" + "github.com/grafana/grafana/pkg/api/response" + "github.com/grafana/grafana/pkg/apimachinery/identity" + "github.com/grafana/grafana/pkg/apimachinery/utils" + "github.com/grafana/grafana/pkg/services/apiserver" + contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" +) + +//go:generate mockery --name K8sClients --structname MockK8sClients --inpackage --filename client_mock.go --with-expecter +type K8sClients interface { + GetDashboardID(c *contextmodel.ReqContext, uid string) (int64, response.Response) + GetStars(c *contextmodel.ReqContext) ([]string, error) + AddStar(c *contextmodel.ReqContext, uid string) error + RemoveStar(c *contextmodel.ReqContext, uid string) error +} + +type k8sClients struct { + namespacer authlib.NamespaceFormatter + configProvider apiserver.DirectRestConfigProvider +} + +var ( + _ K8sClients = (*k8sClients)(nil) +) + +// GetDashboardID implements the K8sClients interface. +func (k *k8sClients) GetDashboardID(c *contextmodel.ReqContext, uid string) (int64, response.Response) { + dyn, err := dynamic.NewForConfig(k.configProvider.GetDirectRestConfig(c)) + if err != nil { + return 0, response.Error(http.StatusInternalServerError, "client config", err) + } + client := dyn.Resource(dashboardsV1.GroupVersion.WithResource(dashboardsV1.DASHBOARD_RESOURCE)).Namespace(k.namespacer(c.OrgID)) + obj, err := client.Get(c.Req.Context(), uid, v1.GetOptions{}) + if err != nil { + return 0, response.Error(http.StatusNotFound, "Dashboard not found", err) + } + dash, err := utils.MetaAccessor(obj) + if err != nil { + return 0, response.Error(http.StatusInternalServerError, "invalid object", err) + } + return dash.GetDeprecatedInternalID(), nil // nolint:staticcheck +} + +// GetStars implements K8sClients. +func (k *k8sClients) GetStars(c *contextmodel.ReqContext) ([]string, error) { + dyn, err := dynamic.NewForConfig(k.configProvider.GetDirectRestConfig(c)) + if err != nil { + return nil, err + } + client := dyn.Resource(preferencesV1.StarsResourceInfo.GroupVersionResource()).Namespace(k.namespacer(c.OrgID)) + + ctx := c.Req.Context() + user, err := identity.GetRequester(ctx) + if err != nil { + return nil, err + } + + obj, _ := client.Get(ctx, "user-"+user.GetIdentifier(), v1.GetOptions{}) + if obj != nil { + resources, ok, _ := unstructured.NestedSlice(obj.Object, "spec", "resource") + if ok && resources != nil { + for _, r := range resources { + tmp, ok := r.(map[string]any) + if ok { + g, _, _ := unstructured.NestedString(tmp, "group") + k, _, _ := unstructured.NestedString(tmp, "kind") + if k == "Dashboard" && g == dashboardsV1.APIGroup { + names, _, _ := unstructured.NestedStringSlice(tmp, "names") + return names, nil + } + } + } + } + } + return []string{}, nil +} + +// AddStar implements K8sClients. +func (k *k8sClients) AddStar(c *contextmodel.ReqContext, uid string) error { + dyn, err := kubernetes.NewForConfig(k.configProvider.GetDirectRestConfig(c)) + if err != nil { + return err + } + + ctx := c.Req.Context() + user, err := identity.GetRequester(ctx) + if err != nil { + return err + } + + ns := k.namespacer(c.OrgID) + + client := dyn.RESTClient() + rsp := client.Put().AbsPath( + "apis", preferencesV1.APIGroup, preferencesV1.APIVersion, "namespaces", ns, + "stars", "user-"+user.GetIdentifier(), + "update", dashboardsV1.APIGroup, dashboardsV1.DashboardKind().Kind(), uid, + ).Do(ctx) + + return rsp.Error() +} + +// RemoveStar implements K8sClients. +func (k *k8sClients) RemoveStar(c *contextmodel.ReqContext, uid string) error { + dyn, err := kubernetes.NewForConfig(k.configProvider.GetDirectRestConfig(c)) + if err != nil { + return err + } + + ctx := c.Req.Context() + user, err := identity.GetRequester(ctx) + if err != nil { + return err + } + + ns := k.namespacer(c.OrgID) + + client := dyn.RESTClient() + rsp := client.Delete().AbsPath( + "apis", preferencesV1.APIGroup, preferencesV1.APIVersion, "namespaces", ns, + "stars", "user-"+user.GetIdentifier(), + "update", dashboardsV1.APIGroup, dashboardsV1.DashboardKind().Kind(), uid, + ).Do(ctx) + + return rsp.Error() +} diff --git a/pkg/services/star/api/client_mock.go b/pkg/services/star/api/client_mock.go new file mode 100644 index 00000000000..ecebf50dbfb --- /dev/null +++ b/pkg/services/star/api/client_mock.go @@ -0,0 +1,248 @@ +// Code generated by mockery v2.53.4. DO NOT EDIT. + +package api + +import ( + contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" + mock "github.com/stretchr/testify/mock" + + response "github.com/grafana/grafana/pkg/api/response" +) + +// MockK8sClients is an autogenerated mock type for the K8sClients type +type MockK8sClients struct { + mock.Mock +} + +type MockK8sClients_Expecter struct { + mock *mock.Mock +} + +func (_m *MockK8sClients) EXPECT() *MockK8sClients_Expecter { + return &MockK8sClients_Expecter{mock: &_m.Mock} +} + +// AddStar provides a mock function with given fields: c, uid +func (_m *MockK8sClients) AddStar(c *contextmodel.ReqContext, uid string) error { + ret := _m.Called(c, uid) + + if len(ret) == 0 { + panic("no return value specified for AddStar") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*contextmodel.ReqContext, string) error); ok { + r0 = rf(c, uid) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockK8sClients_AddStar_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddStar' +type MockK8sClients_AddStar_Call struct { + *mock.Call +} + +// AddStar is a helper method to define mock.On call +// - c *contextmodel.ReqContext +// - uid string +func (_e *MockK8sClients_Expecter) AddStar(c interface{}, uid interface{}) *MockK8sClients_AddStar_Call { + return &MockK8sClients_AddStar_Call{Call: _e.mock.On("AddStar", c, uid)} +} + +func (_c *MockK8sClients_AddStar_Call) Run(run func(c *contextmodel.ReqContext, uid string)) *MockK8sClients_AddStar_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*contextmodel.ReqContext), args[1].(string)) + }) + return _c +} + +func (_c *MockK8sClients_AddStar_Call) Return(_a0 error) *MockK8sClients_AddStar_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockK8sClients_AddStar_Call) RunAndReturn(run func(*contextmodel.ReqContext, string) error) *MockK8sClients_AddStar_Call { + _c.Call.Return(run) + return _c +} + +// GetDashboardID provides a mock function with given fields: c, uid +func (_m *MockK8sClients) GetDashboardID(c *contextmodel.ReqContext, uid string) (int64, response.Response) { + ret := _m.Called(c, uid) + + if len(ret) == 0 { + panic("no return value specified for GetDashboardID") + } + + var r0 int64 + var r1 response.Response + if rf, ok := ret.Get(0).(func(*contextmodel.ReqContext, string) (int64, response.Response)); ok { + return rf(c, uid) + } + if rf, ok := ret.Get(0).(func(*contextmodel.ReqContext, string) int64); ok { + r0 = rf(c, uid) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(*contextmodel.ReqContext, string) response.Response); ok { + r1 = rf(c, uid) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(response.Response) + } + } + + return r0, r1 +} + +// MockK8sClients_GetDashboardID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDashboardID' +type MockK8sClients_GetDashboardID_Call struct { + *mock.Call +} + +// GetDashboardID is a helper method to define mock.On call +// - c *contextmodel.ReqContext +// - uid string +func (_e *MockK8sClients_Expecter) GetDashboardID(c interface{}, uid interface{}) *MockK8sClients_GetDashboardID_Call { + return &MockK8sClients_GetDashboardID_Call{Call: _e.mock.On("GetDashboardID", c, uid)} +} + +func (_c *MockK8sClients_GetDashboardID_Call) Run(run func(c *contextmodel.ReqContext, uid string)) *MockK8sClients_GetDashboardID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*contextmodel.ReqContext), args[1].(string)) + }) + return _c +} + +func (_c *MockK8sClients_GetDashboardID_Call) Return(_a0 int64, _a1 response.Response) *MockK8sClients_GetDashboardID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockK8sClients_GetDashboardID_Call) RunAndReturn(run func(*contextmodel.ReqContext, string) (int64, response.Response)) *MockK8sClients_GetDashboardID_Call { + _c.Call.Return(run) + return _c +} + +// GetStars provides a mock function with given fields: c +func (_m *MockK8sClients) GetStars(c *contextmodel.ReqContext) ([]string, error) { + ret := _m.Called(c) + + if len(ret) == 0 { + panic("no return value specified for GetStars") + } + + var r0 []string + var r1 error + if rf, ok := ret.Get(0).(func(*contextmodel.ReqContext) ([]string, error)); ok { + return rf(c) + } + if rf, ok := ret.Get(0).(func(*contextmodel.ReqContext) []string); ok { + r0 = rf(c) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + if rf, ok := ret.Get(1).(func(*contextmodel.ReqContext) error); ok { + r1 = rf(c) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockK8sClients_GetStars_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStars' +type MockK8sClients_GetStars_Call struct { + *mock.Call +} + +// GetStars is a helper method to define mock.On call +// - c *contextmodel.ReqContext +func (_e *MockK8sClients_Expecter) GetStars(c interface{}) *MockK8sClients_GetStars_Call { + return &MockK8sClients_GetStars_Call{Call: _e.mock.On("GetStars", c)} +} + +func (_c *MockK8sClients_GetStars_Call) Run(run func(c *contextmodel.ReqContext)) *MockK8sClients_GetStars_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*contextmodel.ReqContext)) + }) + return _c +} + +func (_c *MockK8sClients_GetStars_Call) Return(_a0 []string, _a1 error) *MockK8sClients_GetStars_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockK8sClients_GetStars_Call) RunAndReturn(run func(*contextmodel.ReqContext) ([]string, error)) *MockK8sClients_GetStars_Call { + _c.Call.Return(run) + return _c +} + +// RemoveStar provides a mock function with given fields: c, uid +func (_m *MockK8sClients) RemoveStar(c *contextmodel.ReqContext, uid string) error { + ret := _m.Called(c, uid) + + if len(ret) == 0 { + panic("no return value specified for RemoveStar") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*contextmodel.ReqContext, string) error); ok { + r0 = rf(c, uid) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockK8sClients_RemoveStar_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveStar' +type MockK8sClients_RemoveStar_Call struct { + *mock.Call +} + +// RemoveStar is a helper method to define mock.On call +// - c *contextmodel.ReqContext +// - uid string +func (_e *MockK8sClients_Expecter) RemoveStar(c interface{}, uid interface{}) *MockK8sClients_RemoveStar_Call { + return &MockK8sClients_RemoveStar_Call{Call: _e.mock.On("RemoveStar", c, uid)} +} + +func (_c *MockK8sClients_RemoveStar_Call) Run(run func(c *contextmodel.ReqContext, uid string)) *MockK8sClients_RemoveStar_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*contextmodel.ReqContext), args[1].(string)) + }) + return _c +} + +func (_c *MockK8sClients_RemoveStar_Call) Return(_a0 error) *MockK8sClients_RemoveStar_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockK8sClients_RemoveStar_Call) RunAndReturn(run func(*contextmodel.ReqContext, string) error) *MockK8sClients_RemoveStar_Call { + _c.Call.Return(run) + return _c +} + +// NewMockK8sClients creates a new instance of MockK8sClients. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockK8sClients(t interface { + mock.TestingT + Cleanup(func()) +}) *MockK8sClients { + mock := &MockK8sClients{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/services/store/kind/dashboard/dashboard.go b/pkg/services/store/kind/dashboard/dashboard.go index 3229acbe80d..d6000689c1a 100644 --- a/pkg/services/store/kind/dashboard/dashboard.go +++ b/pkg/services/store/kind/dashboard/dashboard.go @@ -334,6 +334,9 @@ func filterOutSpecialDatasources(dash *DashboardSummaryInfo) { case "-- Dashboard --": // The `Dashboard` datasource refers to the results of the query used in another panel continue + case "grafana": + // this is the uid for the -- Grafana -- datasource + continue default: dsRefs = append(dsRefs, ds) } diff --git a/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json b/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json index 136e20a8571..a6502144412 100644 --- a/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json +++ b/pkg/services/store/kind/dashboard/testdata/check-string-datasource-id-info.json @@ -2,24 +2,12 @@ "id": 250, "title": "fast streaming", "tags": null, - "datasource": [ - { - "uid": "grafana", - "type": "datasource" - } - ], "panels": [ { "id": 3, "title": "Panel Title", "type": "timeseries", - "pluginVersion": "7.5.0-pre", - "datasource": [ - { - "uid": "grafana", - "type": "datasource" - } - ] + "pluginVersion": "7.5.0-pre" } ], "schemaVersion": 27, diff --git a/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json b/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json index 1f2fabff36f..37a7b893804 100644 --- a/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json +++ b/pkg/services/store/kind/dashboard/testdata/special-datasource-types-info.json @@ -3,10 +3,6 @@ "title": "special ds", "tags": null, "datasource": [ - { - "uid": "grafana", - "type": "datasource" - }, { "uid": "dgd92lq7k", "type": "frser-sqlite-datasource" @@ -22,10 +18,6 @@ "title": "mixed ds with grafana ds", "type": "timeseries", "datasource": [ - { - "uid": "grafana", - "type": "datasource" - }, { "uid": "dgd92lq7k", "type": "frser-sqlite-datasource" @@ -45,13 +37,7 @@ { "id": 6, "title": "grafana ds", - "type": "timeseries", - "datasource": [ - { - "uid": "grafana", - "type": "datasource" - } - ] + "type": "timeseries" }, { "id": 2, diff --git a/pkg/services/team/team.go b/pkg/services/team/team.go index 49db0c53734..8d55fb908a8 100644 --- a/pkg/services/team/team.go +++ b/pkg/services/team/team.go @@ -19,7 +19,7 @@ type Service interface { GetTeamIDsByUser(ctx context.Context, query *GetTeamIDsByUserQuery) ([]int64, error) IsTeamMember(ctx context.Context, orgId int64, teamId int64, userId int64) (bool, error) RemoveUsersMemberships(tx context.Context, userID int64) error - GetUserTeamMemberships(ctx context.Context, orgID, userID int64, external bool) ([]*TeamMemberDTO, error) + GetUserTeamMemberships(ctx context.Context, orgID, userID int64, external bool, bypassCache bool) ([]*TeamMemberDTO, error) GetTeamMembers(ctx context.Context, query *GetTeamMembersQuery) ([]*TeamMemberDTO, error) RegisterDelete(query string) } diff --git a/pkg/services/team/teamapi/team.go b/pkg/services/team/teamapi/team.go index 9df49d0d66a..69436edd8cc 100644 --- a/pkg/services/team/teamapi/team.go +++ b/pkg/services/team/teamapi/team.go @@ -431,7 +431,8 @@ func (tapi *TeamAPI) validateTeam(c *contextmodel.ReqContext, teamID int64, prov return response.Error(http.StatusInternalServerError, "Failed to get Team", err) } - if teamDTO.IsProvisioned { + isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false) + if isGroupSyncEnabled && teamDTO.IsProvisioned { return response.Error(http.StatusBadRequest, provisionedMessage, err) } diff --git a/pkg/services/team/teamapi/team_members.go b/pkg/services/team/teamapi/team_members.go index 1303f97e028..e9f4d0ac91d 100644 --- a/pkg/services/team/teamapi/team_members.go +++ b/pkg/services/team/teamapi/team_members.go @@ -299,7 +299,8 @@ func (tapi *TeamAPI) removeTeamMember(c *contextmodel.ReqContext) response.Respo return response.Error(http.StatusInternalServerError, "Failed to get Team", err) } - if existingTeam.IsProvisioned { + isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false) + if isGroupSyncEnabled && existingTeam.IsProvisioned { return response.Error(http.StatusBadRequest, "Team memberships cannot be updated for provisioned teams", err) } diff --git a/pkg/services/team/teamapi/team_members_test.go b/pkg/services/team/teamapi/team_members_test.go index f14c01d1c73..65f8f2693ee 100644 --- a/pkg/services/team/teamapi/team_members_test.go +++ b/pkg/services/team/teamapi/team_members_test.go @@ -169,13 +169,15 @@ func TestUpdateTeamMembersAPIEndpoint(t *testing.T) { }) } -func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) { +func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsEnabled(t *testing.T) { server := SetupAPITestServer(t, &teamtest.FakeService{ ExpectedIsMember: true, ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true}, + }, func(tapi *TeamAPI) { + tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true") }) - t.Run("should not be able to update team member from a provisioned team", func(t *testing.T) { + t.Run("should not be able to update team member from a provisioned team if team sync is enabled", func(t *testing.T) { req := webtest.RequestWithSignedInUser( server.NewRequest(http.MethodPut, "/api/teams/1/members/1", strings.NewReader("{\"permission\": 1}")), authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}), @@ -186,7 +188,7 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) { require.NoError(t, res.Body.Close()) }) - t.Run("should not be able to update team member from a provisioned team by team UID", func(t *testing.T) { + t.Run("should not be able to update team member from a provisioned team by team UID if team sync is enabled", func(t *testing.T) { req := webtest.RequestWithSignedInUser( server.NewRequest(http.MethodPut, "/api/teams/a00001/members/1", strings.NewReader("{\"permission\": 1}")), authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}), @@ -198,6 +200,27 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) { }) } +func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsDisabled(t *testing.T) { + t.Run("should be able to delete team member from a provisioned team when SCIM group sync is disabled", func(t *testing.T) { + server := SetupAPITestServer(t, nil, func(hs *TeamAPI) { + hs.teamService = &teamtest.FakeService{ + ExpectedIsMember: true, + ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true}, + } + hs.teamPermissionsService = &actest.FakePermissionsService{} + }) + + req := webtest.RequestWithSignedInUser( + server.NewRequest(http.MethodDelete, "/api/teams/1/members/1", nil), + authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}), + ) + res, err := server.SendJSON(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + require.NoError(t, res.Body.Close()) + }) +} + func TestDeleteTeamMembersAPIEndpoint(t *testing.T) { server := SetupAPITestServer(t, nil, func(hs *TeamAPI) { hs.teamService = &teamtest.FakeService{ @@ -236,6 +259,8 @@ func TestDeleteTeamMembersFromProvisionedTeam(t *testing.T) { ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true}, } hs.teamPermissionsService = &actest.FakePermissionsService{} + }, func(hs *TeamAPI) { + hs.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true") }) t.Run("should not be able to delete team member from a provisioned team", func(t *testing.T) { diff --git a/pkg/services/team/teamimpl/team.go b/pkg/services/team/teamimpl/team.go index 245ee98ddae..94fcf6d2186 100644 --- a/pkg/services/team/teamimpl/team.go +++ b/pkg/services/team/teamimpl/team.go @@ -114,18 +114,20 @@ func (s *Service) RemoveUsersMemberships(ctx context.Context, userID int64) erro return s.store.RemoveUsersMemberships(ctx, userID) } -func (s *Service) GetUserTeamMemberships(ctx context.Context, orgID, userID int64, external bool) ([]*team.TeamMemberDTO, error) { +func (s *Service) GetUserTeamMemberships(ctx context.Context, orgID, userID int64, external bool, bypassCache bool) ([]*team.TeamMemberDTO, error) { ctx, span := s.tracer.Start(ctx, "team.GetUserTeamMemberships", trace.WithAttributes( attribute.Int64("orgID", orgID), attribute.Int64("userID", userID), )) defer span.End() cacheKey := fmt.Sprintf("teams:%d:%d:%t", orgID, userID, external) - if cached, found := s.cache.Get(cacheKey); found { - if teams, ok := cached.([]*team.TeamMemberDTO); ok { - return teams, nil + if !bypassCache { + if cached, found := s.cache.Get(cacheKey); found { + if teams, ok := cached.([]*team.TeamMemberDTO); ok { + return teams, nil + } + s.cache.Delete(cacheKey) } - s.cache.Delete(cacheKey) } teams, err := s.store.GetMemberships(ctx, orgID, userID, external) if err != nil { @@ -137,7 +139,9 @@ func (s *Service) GetUserTeamMemberships(ctx context.Context, orgID, userID int6 return []*team.TeamMemberDTO{}, nil } - s.cache.Set(cacheKey, teams, defaultCacheDuration) + if !bypassCache { + s.cache.Set(cacheKey, teams, defaultCacheDuration) + } return teams, nil } diff --git a/pkg/services/team/teamtest/team.go b/pkg/services/team/teamtest/team.go index 3caefb9562e..3b88d05d5d3 100644 --- a/pkg/services/team/teamtest/team.go +++ b/pkg/services/team/teamtest/team.go @@ -58,7 +58,7 @@ func (s *FakeService) RemoveUsersMemberships(ctx context.Context, userID int64) return s.ExpectedError } -func (s *FakeService) GetUserTeamMemberships(ctx context.Context, orgID, userID int64, external bool) ([]*team.TeamMemberDTO, error) { +func (s *FakeService) GetUserTeamMemberships(ctx context.Context, orgID, userID int64, external bool, bypassCache bool) ([]*team.TeamMemberDTO, error) { return s.ExpectedMembers, s.ExpectedError } diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index 50cecb688bb..89a42e9d56d 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -138,6 +138,7 @@ type Cfg struct { ProvisioningDisableControllers bool ProvisioningAllowedTargets []string ProvisioningAllowImageRendering bool + ProvisioningMinSyncInterval time.Duration ProvisioningRepositoryTypes []string ProvisioningLokiURL string ProvisioningLokiUser string @@ -543,9 +544,6 @@ type Cfg struct { // Cloud Migration CloudMigration CloudMigrationSettings - // Feature Management Settings - FeatureManagement FeatureMgmtSettings - // Alerting AlertingEvaluationTimeout time.Duration AlertingNotificationTimeout time.Duration @@ -578,13 +576,15 @@ type Cfg struct { MaxPageSizeBytes int IndexPath string IndexWorkers int + IndexRebuildWorkers int IndexMaxBatchSize int IndexFileThreshold int IndexMinCount int IndexRebuildInterval time.Duration IndexCacheTTL time.Duration - MaxFileIndexAge time.Duration // Max age of file-based indexes. Index older than this will not be reused between restarts. - MinFileIndexBuildVersion string // Minimum version of Grafana that built the file-based index. If index was built with older Grafana, it will not be reused between restarts. + IndexMinUpdateInterval time.Duration // Don't update index if it was updated less than this interval ago. + MaxFileIndexAge time.Duration // Max age of file-based indexes. Index older than this will be rebuilt asynchronously. + MinFileIndexBuildVersion string // Minimum version of Grafana that built the file-based index. If index was built with older Grafana, it will be rebuilt asynchronously. EnableSharding bool QOSEnabled bool QOSNumberWorker int @@ -1429,7 +1429,6 @@ func (cfg *Cfg) parseINIFile(iniFile *ini.File) error { logSection := iniFile.Section("log") cfg.UserFacingDefaultError = logSection.Key("user_facing_default_error").MustString("please inspect Grafana server log for details") - cfg.readFeatureManagementConfig() cfg.readPublicDashboardsSettings() cfg.readCloudMigrationSettings() cfg.readSecretsManagerSettings() @@ -2128,6 +2127,7 @@ func (cfg *Cfg) readProvisioningSettings(iniFile *ini.File) error { cfg.ProvisioningAllowedTargets = []string{"instance", "folder"} } cfg.ProvisioningAllowImageRendering = iniFile.Section("provisioning").Key("allow_image_rendering").MustBool(true) + cfg.ProvisioningMinSyncInterval = iniFile.Section("provisioning").Key("min_sync_interval").MustDuration(10 * time.Second) // Read job history configuration cfg.ProvisioningLokiURL = valueAsString(iniFile.Section("provisioning"), "loki_url", "") diff --git a/pkg/setting/setting_featuremgmt.go b/pkg/setting/setting_featuremgmt.go deleted file mode 100644 index 1887e9e0856..00000000000 --- a/pkg/setting/setting_featuremgmt.go +++ /dev/null @@ -1,38 +0,0 @@ -package setting - -import ( - "github.com/grafana/grafana/pkg/util" -) - -type FeatureMgmtSettings struct { - HiddenToggles map[string]struct{} - ReadOnlyToggles map[string]struct{} - AllowEditing bool - UpdateWebhook string - UpdateWebhookToken string -} - -func (cfg *Cfg) readFeatureManagementConfig() { - section := cfg.Raw.Section("feature_management") - - hiddenToggles := make(map[string]struct{}) - readOnlyToggles := make(map[string]struct{}) - - // parse the comma separated list in `hidden_toggles`. - hiddenTogglesStr := valueAsString(section, "hidden_toggles", "") - for _, feature := range util.SplitString(hiddenTogglesStr) { - hiddenToggles[feature] = struct{}{} - } - - // parse the comma separated list in `read_only_toggles`. - readOnlyTogglesStr := valueAsString(section, "read_only_toggles", "") - for _, feature := range util.SplitString(readOnlyTogglesStr) { - readOnlyToggles[feature] = struct{}{} - } - - cfg.FeatureManagement.HiddenToggles = hiddenToggles - cfg.FeatureManagement.ReadOnlyToggles = readOnlyToggles - cfg.FeatureManagement.AllowEditing = cfg.SectionWithEnvOverrides("feature_management").Key("allow_editing").MustBool(false) - cfg.FeatureManagement.UpdateWebhook = cfg.SectionWithEnvOverrides("feature_management").Key("update_webhook").MustString("") - cfg.FeatureManagement.UpdateWebhookToken = cfg.SectionWithEnvOverrides("feature_management").Key("update_webhook_token").MustString("") -} diff --git a/pkg/setting/setting_unified_storage.go b/pkg/setting/setting_unified_storage.go index 921fb2f24a5..914d5f789d7 100644 --- a/pkg/setting/setting_unified_storage.go +++ b/pkg/setting/setting_unified_storage.go @@ -54,6 +54,7 @@ func (cfg *Cfg) setUnifiedStorageConfig() { cfg.MaxPageSizeBytes = section.Key("max_page_size_bytes").MustInt(0) cfg.IndexPath = section.Key("index_path").String() cfg.IndexWorkers = section.Key("index_workers").MustInt(10) + cfg.IndexRebuildWorkers = section.Key("index_rebuild_workers").MustInt(5) cfg.IndexMaxBatchSize = section.Key("index_max_batch_size").MustInt(100) cfg.EnableSharding = section.Key("enable_sharding").MustBool(false) cfg.QOSEnabled = section.Key("qos_enabled").MustBool(false) @@ -72,6 +73,7 @@ func (cfg *Cfg) setUnifiedStorageConfig() { // default to 24 hours because usage insights summarizes the data every 24 hours cfg.IndexRebuildInterval = section.Key("index_rebuild_interval").MustDuration(24 * time.Hour) cfg.IndexCacheTTL = section.Key("index_cache_ttl").MustDuration(10 * time.Minute) + cfg.IndexMinUpdateInterval = section.Key("index_min_update_interval").MustDuration(0) cfg.SprinklesApiServer = section.Key("sprinkles_api_server").String() cfg.SprinklesApiServerPageLimit = section.Key("sprinkles_api_server_page_limit").MustInt(10000) cfg.CACertPath = section.Key("ca_cert_path").String() diff --git a/pkg/storage/secret/metadata/decrypt_store.go b/pkg/storage/secret/metadata/decrypt_store.go index b7a06c99e0e..d148e365925 100644 --- a/pkg/storage/secret/metadata/decrypt_store.go +++ b/pkg/storage/secret/metadata/decrypt_store.go @@ -1,7 +1,9 @@ package metadata import ( + "cmp" "context" + "errors" "fmt" "time" @@ -83,22 +85,30 @@ func (s *decryptStorage) Decrypt(ctx context.Context, namespace xkube.Namespace, } } + decryptResultLabel := metrics.DecryptResultLabel(decryptErr) + if decryptErr == nil { + span.SetStatus(codes.Ok, "Decrypt succeeded") args = append(args, "operation", "decrypt_secret_success") } else { span.SetStatus(codes.Error, "Decrypt failed") span.RecordError(decryptErr) - args = append(args, "operation", "decrypt_secret_error", "error", decryptErr.Error(), "result", metrics.DecryptResultLabel(decryptErr)) + args = append(args, "operation", "decrypt_secret_error", "error", decryptErr.Error(), "result", decryptResultLabel) } logging.FromContext(ctx).Info("Secrets Audit Log", args...) - s.metrics.DecryptDuration.WithLabelValues(metrics.DecryptResultLabel(decryptErr)).Observe(time.Since(start).Seconds()) + s.metrics.DecryptDuration.WithLabelValues(decryptResultLabel).Observe(time.Since(start).Seconds()) + + // Do not leak error details to caller, return only the wrapped domain errors. + if decryptErr != nil { + decryptErr = cmp.Or(errors.Unwrap(decryptErr), contracts.ErrDecryptFailed) + } }() // Basic authn check before reading a secure value metadata, it is here on purpose. if _, ok := claims.AuthInfoFrom(ctx); !ok { - return "", contracts.ErrDecryptNotAuthorized + return "", fmt.Errorf("no auth info in context (%w)", contracts.ErrDecryptNotAuthorized) } // The auth token will not necessarily have the permission to read the secure value metadata, @@ -106,27 +116,27 @@ func (s *decryptStorage) Decrypt(ctx context.Context, namespace xkube.Namespace, // function call happens after this. sv, err := s.secureValueMetadataStorage.Read(ctx, namespace, name, contracts.ReadOpts{}) if err != nil { - return "", contracts.ErrDecryptNotFound + return "", fmt.Errorf("failed to read secure value metadata storage: %v (%w)", err, contracts.ErrDecryptNotFound) } - decrypterIdentity, authorized := s.decryptAuthorizer.Authorize(ctx, namespace, name, sv.Spec.Decrypters, sv.OwnerReferences) + decrypterIdentity, authorized, reason := s.decryptAuthorizer.Authorize(ctx, namespace, name, sv.Spec.Decrypters, sv.OwnerReferences) if !authorized { - return "", contracts.ErrDecryptNotAuthorized + return "", fmt.Errorf("failed to authorize decryption with reason %v (%w)", reason, contracts.ErrDecryptNotAuthorized) } keeperConfig, err := s.keeperMetadataStorage.GetKeeperConfig(ctx, namespace.String(), sv.Spec.Keeper, contracts.ReadOpts{}) if err != nil { - return "", contracts.ErrDecryptFailed + return "", fmt.Errorf("failed to read keeper config metadata storage: %v (%w)", err, contracts.ErrDecryptFailed) } keeper, err := s.keeperService.KeeperForConfig(keeperConfig) if err != nil { - return "", contracts.ErrDecryptFailed + return "", fmt.Errorf("failed to get keeper for config: %v (%w)", err, contracts.ErrDecryptFailed) } exposedValue, err := keeper.Expose(ctx, keeperConfig, namespace.String(), name, sv.Status.Version) if err != nil { - return "", contracts.ErrDecryptFailed + return "", fmt.Errorf("failed to expose secret: %v (%w)", err, contracts.ErrDecryptFailed) } return exposedValue, nil diff --git a/pkg/storage/secret/metadata/decrypt_store_test.go b/pkg/storage/secret/metadata/decrypt_store_test.go index 03edc12e67f..5b29fd1fd62 100644 --- a/pkg/storage/secret/metadata/decrypt_store_test.go +++ b/pkg/storage/secret/metadata/decrypt_store_test.go @@ -32,7 +32,7 @@ func TestIntegrationDecrypt(t *testing.T) { sut := testutils.Setup(t) exposed, err := sut.DecryptStorage.Decrypt(ctx, "default", "name") - require.Error(t, err) + require.Equal(t, err.Error(), contracts.ErrDecryptNotAuthorized.Error()) // make sure we are stripping the error details require.Empty(t, exposed) }) @@ -48,7 +48,7 @@ func TestIntegrationDecrypt(t *testing.T) { sut := testutils.Setup(t) exposed, err := sut.DecryptStorage.Decrypt(authCtx, "default", "non-existent-value") - require.ErrorIs(t, err, contracts.ErrDecryptNotFound) + require.Equal(t, err.Error(), contracts.ErrDecryptNotFound.Error()) // make sure we are stripping the error details require.Empty(t, exposed) }) @@ -114,7 +114,7 @@ func TestIntegrationDecrypt(t *testing.T) { require.NoError(t, err) exposed, err := sut.DecryptStorage.Decrypt(authCtx, "default", svName) - require.ErrorIs(t, err, contracts.ErrDecryptNotAuthorized) + require.Equal(t, err.Error(), contracts.ErrDecryptNotAuthorized.Error()) // make sure we are stripping the error details require.Empty(t, exposed) }) @@ -146,7 +146,7 @@ func TestIntegrationDecrypt(t *testing.T) { require.NoError(t, err) exposed, err := sut.DecryptStorage.Decrypt(authCtx, "default", "sv-test") - require.ErrorIs(t, err, contracts.ErrDecryptNotAuthorized) + require.Equal(t, err.Error(), contracts.ErrDecryptNotAuthorized.Error()) // make sure we are stripping the error details require.Empty(t, exposed) }) @@ -179,7 +179,7 @@ func TestIntegrationDecrypt(t *testing.T) { require.NoError(t, err) exposed, err := sut.DecryptStorage.Decrypt(authCtx, "default", svName) - require.ErrorIs(t, err, contracts.ErrDecryptNotAuthorized) + require.Equal(t, err.Error(), contracts.ErrDecryptNotAuthorized.Error()) // make sure we are stripping the error details) require.Empty(t, exposed) }) @@ -211,7 +211,7 @@ func TestIntegrationDecrypt(t *testing.T) { require.NoError(t, err) exposed, err := sut.DecryptStorage.Decrypt(authCtx, "default", "sv-test") - require.ErrorIs(t, err, contracts.ErrDecryptNotAuthorized) + require.Equal(t, err.Error(), contracts.ErrDecryptNotAuthorized.Error()) // make sure we are stripping the error details require.Empty(t, exposed) }) @@ -244,8 +244,7 @@ func TestIntegrationDecrypt(t *testing.T) { require.NoError(t, err) exposed, err := sut.DecryptStorage.Decrypt(authCtx, "default", svName) - require.Error(t, err) - require.Equal(t, err.Error(), "not authorized") + require.Equal(t, err.Error(), contracts.ErrDecryptNotAuthorized.Error()) // make sure we are stripping the error details require.Empty(t, exposed) }) diff --git a/pkg/storage/unified/apistore/permissions.go b/pkg/storage/unified/apistore/permissions.go index 95d2099cb86..395ac65f4f2 100644 --- a/pkg/storage/unified/apistore/permissions.go +++ b/pkg/storage/unified/apistore/permissions.go @@ -39,11 +39,6 @@ func afterCreatePermissionCreator(ctx context.Context, return nil, errors.New("missing auth info") } - idtype := auth.GetIdentityType() - if idtype != authtypes.TypeUser && idtype != authtypes.TypeServiceAccount && idtype != authtypes.TypeAccessPolicy { - return nil, fmt.Errorf("only users, service accounts, and access policies may grant permissions using an annotation") - } - return func(ctx context.Context) error { return setter(ctx, key, auth, val) }, nil diff --git a/pkg/storage/unified/apistore/permissions_test.go b/pkg/storage/unified/apistore/permissions_test.go index ff42a4cd189..fab28e6dc37 100644 --- a/pkg/storage/unified/apistore/permissions_test.go +++ b/pkg/storage/unified/apistore/permissions_test.go @@ -9,7 +9,6 @@ import ( authtypes "github.com/grafana/authlib/types" "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1" - "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/storage/unified/resourcepb" ) @@ -46,85 +45,4 @@ func TestAfterCreatePermissionCreator(t *testing.T) { require.Nil(t, creator) require.Contains(t, err.Error(), "missing auth info") }) - - t.Run("should succeed for user identity", func(t *testing.T) { - ctx := identity.WithRequester(context.Background(), &identity.StaticRequester{ - Type: authtypes.TypeUser, - OrgID: 1, - OrgRole: "Admin", - UserID: 1, - }) - obj := &v0alpha1.Dashboard{} - key := &resourcepb.ResourceKey{ - Group: "test", - Resource: "test", - Namespace: "test", - Name: "test", - } - - creator, err := afterCreatePermissionCreator(ctx, key, utils.AnnoGrantPermissionsDefault, obj, mockSetter) - require.NoError(t, err) - require.NotNil(t, creator) - - err = creator(ctx) - require.NoError(t, err) - }) - - t.Run("should succeed for service account identity", func(t *testing.T) { - ctx := identity.WithRequester(context.Background(), &identity.StaticRequester{ - Type: authtypes.TypeServiceAccount, - OrgID: 1, - OrgRole: "Admin", - UserID: 1, - }) - obj := &v0alpha1.Dashboard{} - key := &resourcepb.ResourceKey{ - Group: "test", - Resource: "test", - Namespace: "test", - Name: "test", - } - - creator, err := afterCreatePermissionCreator(ctx, key, utils.AnnoGrantPermissionsDefault, obj, mockSetter) - require.NoError(t, err) - require.NotNil(t, creator) - - err = creator(ctx) - require.NoError(t, err) - }) - - t.Run("should succeed for access policy identity", func(t *testing.T) { - ctx := identity.WithRequester(context.Background(), &identity.StaticRequester{ - Type: authtypes.TypeAccessPolicy, - OrgID: 1, - OrgRole: "Admin", - UserID: 1, - }) - obj := &v0alpha1.Dashboard{} - key := &resourcepb.ResourceKey{ - Group: "test", - Resource: "test", - Namespace: "test", - Name: "test", - } - - creator, err := afterCreatePermissionCreator(ctx, key, utils.AnnoGrantPermissionsDefault, obj, mockSetter) - require.NoError(t, err) - require.NotNil(t, creator) - - err = creator(ctx) - require.NoError(t, err) - }) - - t.Run("should error for non-user/non-service-account identity", func(t *testing.T) { - ctx := identity.WithRequester(context.Background(), &identity.StaticRequester{ - Type: authtypes.TypeAnonymous, - }) - obj := &v0alpha1.Dashboard{} - - creator, err := afterCreatePermissionCreator(ctx, nil, utils.AnnoGrantPermissionsDefault, obj, mockSetter) - require.Error(t, err) - require.Nil(t, creator) - require.Contains(t, err.Error(), "only users, service accounts, and access policies may grant permissions") - }) } diff --git a/pkg/storage/unified/apistore/prepare.go b/pkg/storage/unified/apistore/prepare.go index a7f94a69706..3691cb29bbd 100644 --- a/pkg/storage/unified/apistore/prepare.go +++ b/pkg/storage/unified/apistore/prepare.go @@ -100,6 +100,9 @@ func (s *Storage) prepareObjectForStorage(ctx context.Context, newObject runtime if obj.GetFolder() != "" && !s.opts.EnableFolderSupport { return v, apierrors.NewBadRequest(fmt.Sprintf("folders are not supported for: %s", s.gr.String())) } + if s.opts.MaximumNameLength > 0 && len(obj.GetName()) > s.opts.MaximumNameLength { + return v, apierrors.NewBadRequest(fmt.Sprintf("name exceeds maximum length (%d)", s.opts.MaximumNameLength)) + } v.grantPermissions = obj.GetAnnotation(utils.AnnoKeyGrantPermissions) if v.grantPermissions != "" { diff --git a/pkg/storage/unified/apistore/prepare_test.go b/pkg/storage/unified/apistore/prepare_test.go index 3564e183e34..0b36f71117a 100644 --- a/pkg/storage/unified/apistore/prepare_test.go +++ b/pkg/storage/unified/apistore/prepare_test.go @@ -3,6 +3,7 @@ package apistore import ( "context" "math/rand/v2" + "strings" "testing" "time" @@ -35,6 +36,7 @@ func TestPrepareObjectForStorage(t *testing.T) { opts: StorageOptions{ EnableFolderSupport: true, LargeObjectSupport: nil, + MaximumNameLength: 100, }, } @@ -49,10 +51,18 @@ func TestPrepareObjectForStorage(t *testing.T) { }) t.Run("Error on missing name", func(t *testing.T) { - dashboard := dashv1.Dashboard{} - _, err := s.prepareObjectForStorage(ctx, dashboard.DeepCopyObject()) + dashboard := &dashv1.Dashboard{} + _, err := s.prepareObjectForStorage(ctx, dashboard) require.Error(t, err) - require.Contains(t, err.Error(), "missing name") + require.ErrorContains(t, err, "missing name") + }) + + t.Run("name is too long", func(t *testing.T) { + dashboard := &dashv1.Dashboard{} + dashboard.Name = strings.Repeat("a", 120) + _, err := s.prepareObjectForStorage(ctx, dashboard) + require.Error(t, err) + require.ErrorContains(t, err, "name exceeds maximum length") }) t.Run("Error on non-empty resource version", func(t *testing.T) { diff --git a/pkg/storage/unified/apistore/store.go b/pkg/storage/unified/apistore/store.go index 8b263fde25b..a6c37c00dab 100644 --- a/pkg/storage/unified/apistore/store.go +++ b/pkg/storage/unified/apistore/store.go @@ -59,6 +59,9 @@ type StorageOptions struct { // Allow writing objects with metadata.annotations[grafana.app/folder] EnableFolderSupport bool + // Some resources should not allow the absolute maximum (254 characters) + MaximumNameLength int + // Add internalID label when missing RequireDeprecatedInternalID bool @@ -290,13 +293,6 @@ func (s *Storage) Delete( if err := preconditions.Check(key, out); err != nil { return err } - - if preconditions.ResourceVersion != nil { - cmd.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64) - if err != nil { - return err - } - } if preconditions.UID != nil { cmd.Uid = string(*preconditions.UID) } @@ -316,6 +312,10 @@ func (s *Storage) Delete( return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_DELETED, key, out, out) } + cmd.ResourceVersion, err = meta.GetResourceVersionInt64() + if err != nil { + return resource.GetError(resource.AsErrorResult(err)) + } rsp, err := s.store.Delete(ctx, cmd) if err != nil { return resource.GetError(resource.AsErrorResult(err)) @@ -533,6 +533,18 @@ func (s *Storage) GuaranteedUpdate( if err != nil { return err } + // NOTE: by default, the RV will **not** be set in the preconditions (it is removed here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L187) + // instead, the RV check is done with the object from the request itself. + // + // the object from the request is retrieved in the tryUpdate function (we use the generic k8s store one). this function calls the UpdateObject function here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L653 + // and that will run a series of transformations: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L219 + // + // the specific transformations it runs depends on what type of update it is. + // for patch, the transformers are set here and use the patchBytes from the request: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go#L697 + // for put, it uses the object from the request here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go#L163 + // + // after those transformations, the RV will then be on the object so that the RV check can properly be done here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L662 + // it will be compared to the current object that we pass in below from storage. if preconditions != nil && preconditions.ResourceVersion != nil { req.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64) if err != nil { @@ -608,41 +620,45 @@ func (s *Storage) GuaranteedUpdate( } continue } - break - } - v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj) - if err != nil { - return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_MODIFIED, key, updatedObj, destination) - } - - // Only update (for real) if the bytes have changed - var rv uint64 - req.Value = v.raw.Bytes() - if !bytes.Equal(req.Value, existingBytes) { - updateResponse, err := s.store.Update(ctx, req) + v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj) if err != nil { - err = resource.GetError(resource.AsErrorResult(err)) - } else if updateResponse.Error != nil { - err = resource.GetError(updateResponse.Error) + return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_MODIFIED, key, updatedObj, destination) } - // Cleanup secure values - if err = v.finish(ctx, err, s.opts.SecureValues); err != nil { + // Only update (for real) if the bytes have changed + var rv uint64 + req.Value = v.raw.Bytes() + if !bytes.Equal(req.Value, existingBytes) { + req.ResourceVersion = readResponse.ResourceVersion + updateResponse, err := s.store.Update(ctx, req) + if err != nil { + err = resource.GetError(resource.AsErrorResult(err)) + } else if updateResponse.Error != nil { + if attempt < MaxUpdateAttempts && updateResponse.Error.Code == http.StatusConflict { + continue // try the read again + } + err = resource.GetError(updateResponse.Error) + } + + // Cleanup secure values + if err = v.finish(ctx, err, s.opts.SecureValues); err != nil { + return err + } + + rv = uint64(updateResponse.ResourceVersion) + } + + if _, err := s.convertToObject(req.Value, destination); err != nil { return err } - rv = uint64(updateResponse.ResourceVersion) - } - - if _, err := s.convertToObject(req.Value, destination); err != nil { - return err - } - - if rv > 0 { - if err := s.versioner.UpdateObject(destination, rv); err != nil { - return err + if rv > 0 { + if err := s.versioner.UpdateObject(destination, rv); err != nil { + return err + } } + return nil } return nil diff --git a/pkg/storage/unified/federated/stats.go b/pkg/storage/unified/federated/stats.go index 40721caa1d0..ce2d6199b2b 100644 --- a/pkg/storage/unified/federated/stats.go +++ b/pkg/storage/unified/federated/stats.go @@ -60,7 +60,7 @@ func (s *LegacyStatsGetter) GetStats(ctx context.Context, in *resourcepb.Resourc group := "sql-fallback" // Legacy alert rule table - err = fn("alert_rule", "org_id=? AND dashboard_uid=?", group, "alertrules", false) + err = fn("alert_rule", "org_id=? AND namespace_uid=?", group, "alertrules", false) if err != nil { return err } diff --git a/pkg/storage/unified/federated/stats_test.go b/pkg/storage/unified/federated/stats_test.go index 3fa1c56a55e..89d50d1da87 100644 --- a/pkg/storage/unified/federated/stats_test.go +++ b/pkg/storage/unified/federated/stats_test.go @@ -78,9 +78,10 @@ func TestIntegrationDirectSQLStats(t *testing.T) { require.NoError(t, err) ruleStore := ngalertstore.SetupStoreForTesting(t, db) + dashboardUID := "test" _, err = ruleStore.InsertAlertRules(context.Background(), ngmodels.NewUserUID(tempUser), []ngmodels.AlertRule{ { - DashboardUID: &folder2UID, + DashboardUID: &dashboardUID, UID: "test", Title: "test", OrgID: 1, @@ -97,7 +98,7 @@ func TestIntegrationDirectSQLStats(t *testing.T) { }, Condition: "ok", Updated: now, - NamespaceUID: "test", + NamespaceUID: folder2UID, ExecErrState: ngmodels.ExecutionErrorState(ngmodels.Alerting), NoDataState: ngmodels.Alerting, IntervalSeconds: 60, diff --git a/pkg/storage/unified/resource/access.go b/pkg/storage/unified/resource/access.go index 161184c7a9e..94cd5b4a801 100644 --- a/pkg/storage/unified/resource/access.go +++ b/pkg/storage/unified/resource/access.go @@ -107,7 +107,7 @@ func NewAuthzLimitedClient(client claims.AccessClient, opts AuthzOptions) claims } // Check implements claims.AccessClient. -func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest) (claims.CheckResponse, error) { +func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req claims.CheckRequest, folder string) (claims.CheckResponse, error) { t := time.Now() ctx, span := c.tracer.Start(ctx, "authzLimitedClient.Check", trace.WithAttributes( attribute.String("group", req.Group), @@ -115,7 +115,7 @@ func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req c attribute.String("namespace", req.Namespace), attribute.String("name", req.Name), attribute.String("verb", req.Verb), - attribute.String("folder", req.Folder), + attribute.String("folder", folder), attribute.Bool("fallback_used", FallbackUsed(ctx)), )) defer span.End() @@ -145,7 +145,7 @@ func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req c span.SetAttributes(attribute.Bool("allowed", true)) return claims.CheckResponse{Allowed: true}, nil } - resp, err := c.client.Check(ctx, id, req) + resp, err := c.client.Check(ctx, id, req, folder) if err != nil { c.logger.Error("Check", "group", req.Group, "resource", req.Resource, "error", err, "duration", time.Since(t), "traceid", trace.SpanContextFromContext(ctx).TraceID().String()) c.metrics.errorsTotal.WithLabelValues(req.Group, req.Resource, req.Verb).Inc() diff --git a/pkg/storage/unified/resource/access_test.go b/pkg/storage/unified/resource/access_test.go index e09e40f01c8..5d664aaba74 100644 --- a/pkg/storage/unified/resource/access_test.go +++ b/pkg/storage/unified/resource/access_test.go @@ -34,7 +34,7 @@ func TestAuthzLimitedClient_Check(t *testing.T) { Verb: utils.VerbGet, Namespace: "stacks-1", } - resp, err := client.Check(context.Background(), &identity.StaticRequester{Namespace: "stacks-1"}, req) + resp, err := client.Check(context.Background(), &identity.StaticRequester{Namespace: "stacks-1"}, req, "") assert.NoError(t, err) assert.Equal(t, test.expected, resp.Allowed) } @@ -135,7 +135,7 @@ func TestNamespaceMatching(t *testing.T) { // Create a mock auth info with the specified namespace // Test Check method user := &identity.StaticRequester{Namespace: tt.authNamespace} - _, checkErr := client.Check(ctx, user, checkReq) + _, checkErr := client.Check(ctx, user, checkReq, "") // Test Compile method compileReq := authlib.ListRequest{ @@ -199,7 +199,7 @@ func TestNamespaceMatchingFallback(t *testing.T) { // Create a mock auth info with the specified namespace // Test Check method user := &identity.StaticRequester{Namespace: tt.authNamespace} - _, checkErr := client.Check(ctx, user, checkReq) + _, checkErr := client.Check(ctx, user, checkReq, "") // Test Compile method compileReq := authlib.ListRequest{ diff --git a/pkg/storage/unified/resource/bleve_index_metrics.go b/pkg/storage/unified/resource/bleve_index_metrics.go index bdd3f071232..00e08c62389 100644 --- a/pkg/storage/unified/resource/bleve_index_metrics.go +++ b/pkg/storage/unified/resource/bleve_index_metrics.go @@ -20,6 +20,7 @@ type BleveIndexMetrics struct { UpdateLatency prometheus.Histogram UpdatedDocuments prometheus.Summary SearchUpdateWaitTime *prometheus.HistogramVec + RebuildQueueLength prometheus.Gauge } var IndexCreationBuckets = []float64{1, 5, 10, 25, 50, 75, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000} @@ -84,6 +85,10 @@ func ProvideIndexMetrics(reg prometheus.Registerer) *BleveIndexMetrics { NativeHistogramMaxBucketNumber: 160, NativeHistogramMinResetDuration: time.Hour, }, []string{"reason"}), + RebuildQueueLength: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "index_server_rebuild_queue_length", + Help: "Number of indexes waiting for rebuild", + }), } // Initialize labels. diff --git a/pkg/storage/unified/resource/bulk.go b/pkg/storage/unified/resource/bulk.go index a0720efa6de..842d6638f5c 100644 --- a/pkg/storage/unified/resource/bulk.go +++ b/pkg/storage/unified/resource/bulk.go @@ -190,7 +190,7 @@ func (s *server) BulkProcess(stream resourcepb.BulkStore_BulkProcessServer) erro Group: k.Group, Resource: k.Resource, Verb: utils.VerbDeleteCollection, - }) + }, "") if err != nil || !rsp.Allowed { return sendAndClose(&resourcepb.BulkResponse{ Error: &resourcepb.ErrorResult{ diff --git a/pkg/storage/unified/resource/errors.go b/pkg/storage/unified/resource/errors.go index 79d3911e470..a0402be1dcb 100644 --- a/pkg/storage/unified/resource/errors.go +++ b/pkg/storage/unified/resource/errors.go @@ -18,8 +18,7 @@ import ( // Package-level errors. var ( - ErrOptimisticLockingFailed = errors.New("optimistic locking failed") - ErrNotImplementedYet = errors.New("not implemented yet") + ErrNotImplementedYet = errors.New("not implemented yet") ) var ( @@ -31,6 +30,12 @@ var ( Code: http.StatusConflict, }, } + + ErrOptimisticLockingFailed = resourcepb.ErrorResult{ + Code: http.StatusConflict, + Reason: "optimistic locking failed", + Message: "requested RV does not match saved RV", + } ) func NewBadRequestError(msg string) *resourcepb.ErrorResult { diff --git a/pkg/storage/unified/resource/keys.go b/pkg/storage/unified/resource/keys.go index 914a9429365..a9a5c4cabeb 100644 --- a/pkg/storage/unified/resource/keys.go +++ b/pkg/storage/unified/resource/keys.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/grafana/grafana/pkg/apimachinery/validation" "github.com/grafana/grafana/pkg/storage/unified/resourcepb" ) @@ -17,17 +18,17 @@ func verifyRequestKey(key *resourcepb.ResourceKey) *resourcepb.ErrorResult { if key.Resource == "" { return NewBadRequestError("request key is missing resource") } - if err := validateName(key.Name); err != nil { - return NewBadRequestError(fmt.Sprintf("name '%s' is invalid: '%s'", key.Name, err)) + if err := validation.IsValidNamespace(key.Namespace); err != nil { + return NewBadRequestError(err[0]) } - if err := validateNamespace(key.Namespace); err != nil { - return NewBadRequestError(fmt.Sprintf("namespace '%s' is invalid: '%s'", key.Namespace, err)) + if err := validation.IsValidGroup(key.Group); err != nil { + return NewBadRequestError(err[0]) } - if err := validateGroup(key.Group); err != nil { - return NewBadRequestError(fmt.Sprintf("group '%s' is invalid: '%s'", key.Group, err)) + if err := validation.IsValidateResource(key.Resource); err != nil { + return NewBadRequestError(err[0]) } - if err := validateResource(key.Resource); err != nil { - return NewBadRequestError(fmt.Sprintf("resource '%s' is invalid: '%s'", key.Resource, err)) + if err := validation.IsValidGrafanaName(key.Name); err != nil { + return NewBadRequestError(err[0]) } return nil } diff --git a/pkg/storage/unified/resource/keys_test.go b/pkg/storage/unified/resource/keys_test.go index f6a894e3a1c..c53678cdcb6 100644 --- a/pkg/storage/unified/resource/keys_test.go +++ b/pkg/storage/unified/resource/keys_test.go @@ -81,7 +81,7 @@ func TestVerifyRequestKey(t *testing.T) { invalidNamespace := "(((((default" invalidName := " " // only spaces - namespaceTooLong := strings.Repeat("a", MaxNameLength+1) + namespaceTooLong := strings.Repeat("a", 61) nameTooLong := strings.Repeat("a", 300) tests := []struct { diff --git a/pkg/storage/unified/resource/search.go b/pkg/storage/unified/resource/search.go index fbf70eebe4f..50541be922f 100644 --- a/pkg/storage/unified/resource/search.go +++ b/pkg/storage/unified/resource/search.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/Masterminds/semver" "github.com/hashicorp/golang-lru/v2/expirable" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -24,6 +25,7 @@ import ( folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1" "github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/storage/unified/resourcepb" + "github.com/grafana/grafana/pkg/util/debouncer" ) const maxBatchSize = 1000 @@ -61,6 +63,11 @@ type BulkIndexRequest struct { ResourceVersion int64 } +type IndexBuildInfo struct { + BuildTime time.Time // Timestamp when the index was built. This value doesn't change on subsequent index updates. + BuildVersion *semver.Version // Grafana version used when originally building the index. This value doesn't change on subsequent index updates. +} + type ResourceIndex interface { // BulkIndex allows for multiple index actions to be performed in a single call. // The order of the items is guaranteed to be the same as the input @@ -81,7 +88,10 @@ type ResourceIndex interface { // UpdateIndex updates the index with the latest data (using update function provided when index was built) to guarantee strong consistency during the search. // Returns RV to which index was updated. - UpdateIndex(ctx context.Context, reason string) (int64, error) + UpdateIndex(ctx context.Context) (int64, error) + + // BuildInfo returns build information about the index. + BuildInfo() (IndexBuildInfo, error) } type BuildFn func(index ResourceIndex) (int64, error) @@ -92,7 +102,7 @@ type UpdateFn func(context context.Context, index ResourceIndex, sinceRV int64) // SearchBackend contains the technology specific logic to support search type SearchBackend interface { // GetIndex returns existing index, or nil. - GetIndex(ctx context.Context, key NamespacedResource) (ResourceIndex, error) + GetIndex(key NamespacedResource) ResourceIndex // BuildIndex builds an index from scratch. // Depending on the size, the backend may choose different options (eg: memory vs disk). @@ -112,6 +122,9 @@ type SearchBackend interface { // TotalDocs returns the total number of documents across all indexes. TotalDocs() int64 + + // GetOpenIndexes returns the list of indexes that are currently open. + GetOpenIndexes() []NamespacedResource } const tracingPrexfixSearch = "unified_search." @@ -132,8 +145,17 @@ type searchSupport struct { buildIndex singleflight.Group - // periodic rebuilding of the indexes to keep usage insights up to date - rebuildInterval time.Duration + // since usage insights is not in unified storage, we need to periodically rebuild the index + // to make sure these data points are up to date. + dashboardIndexMaxAge time.Duration + maxIndexAge time.Duration + minBuildVersion *semver.Version + + bgTaskWg sync.WaitGroup + bgTaskCancel func() + + rebuildQueue *debouncer.Queue[rebuildRequest] + rebuildWorkers int } var ( @@ -150,8 +172,12 @@ func newSearchSupport(opts SearchOptions, storage StorageBackend, access types.A return nil, fmt.Errorf("missing tracer") } - if opts.WorkerThreads < 1 { - opts.WorkerThreads = 1 + if opts.InitWorkerThreads < 1 { + opts.InitWorkerThreads = 1 + } + + if opts.IndexRebuildWorkers < 1 { + opts.IndexRebuildWorkers = 1 } if ownsIndexFn == nil { @@ -161,18 +187,24 @@ func newSearchSupport(opts SearchOptions, storage StorageBackend, access types.A } support = &searchSupport{ - access: access, - tracer: tracer, - storage: storage, - search: opts.Backend, - log: slog.Default().With("logger", "resource-search"), - initWorkers: opts.WorkerThreads, - initMinSize: opts.InitMinCount, - indexMetrics: indexMetrics, - rebuildInterval: opts.RebuildInterval, - ownsIndexFn: ownsIndexFn, + access: access, + tracer: tracer, + storage: storage, + search: opts.Backend, + log: slog.Default().With("logger", "resource-search"), + initWorkers: opts.InitWorkerThreads, + rebuildWorkers: opts.IndexRebuildWorkers, + initMinSize: opts.InitMinCount, + indexMetrics: indexMetrics, + ownsIndexFn: ownsIndexFn, + + dashboardIndexMaxAge: opts.DashboardIndexMaxAge, + maxIndexAge: opts.MaxIndexAge, + minBuildVersion: opts.MinBuildVersion, } + support.rebuildQueue = debouncer.NewQueue(combineRebuildRequests) + info, err := opts.Resources.GetDocumentBuilders() if err != nil { return nil, err @@ -186,6 +218,27 @@ func newSearchSupport(opts SearchOptions, storage StorageBackend, access types.A return support, err } +func combineRebuildRequests(a, b rebuildRequest) (c rebuildRequest, ok bool) { + if a.NamespacedResource != b.NamespacedResource { + // We can only combine requests for the same keys. + return rebuildRequest{}, false + } + + ret := a + + // Using higher "min build version" is stricter condition, and causes more indexes to be rebuilt. + if a.minBuildVersion == nil || (b.minBuildVersion != nil && b.minBuildVersion.GreaterThan(a.minBuildVersion)) { + ret.minBuildVersion = b.minBuildVersion + } + + // Using higher "min build time" is stricter condition, and causes more indexes to be rebuilt. + if a.minBuildTime.IsZero() || (!b.minBuildTime.IsZero() && b.minBuildTime.After(a.minBuildTime)) { + ret.minBuildTime = b.minBuildTime + } + + return ret, true +} + func (s *searchSupport) ListManagedObjects(ctx context.Context, req *resourcepb.ListManagedObjectsRequest) (*resourcepb.ListManagedObjectsResponse, error) { if req.NextPageToken != "" { return &resourcepb.ListManagedObjectsResponse{ @@ -401,7 +454,7 @@ func (s *searchSupport) GetStats(ctx context.Context, req *resourcepb.ResourceSt return rsp, nil } -func (s *searchSupport) buildIndexes(ctx context.Context, rebuild bool) (int, error) { +func (s *searchSupport) buildIndexes(ctx context.Context) (int, error) { totalBatchesIndexed := 0 group := errgroup.Group{} group.SetLimit(s.initWorkers) @@ -412,11 +465,6 @@ func (s *searchSupport) buildIndexes(ctx context.Context, rebuild bool) (int, er } for _, info := range stats { - // only periodically rebuild the dashboard index, specifically to update the usage insights data - if rebuild && info.Resource != dashboardv1.DASHBOARD_RESOURCE { - continue - } - own, err := s.ownsIndexFn(info.NamespacedResource) if err != nil { s.log.Warn("failed to check index ownership, building index", "namespace", info.Namespace, "group", info.Group, "resource", info.Resource, "error", err) @@ -426,18 +474,11 @@ func (s *searchSupport) buildIndexes(ctx context.Context, rebuild bool) (int, er } group.Go(func() error { - if rebuild { - // we need to clear the cache to make sure we get the latest usage insights data - s.builders.clearNamespacedCache(info.NamespacedResource) - } totalBatchesIndexed++ - s.log.Debug("building index", "namespace", info.Namespace, "group", info.Group, "resource", info.Resource, "rebuild", rebuild) + s.log.Debug("building index", "namespace", info.Namespace, "group", info.Group, "resource", info.Resource) reason := "init" - if rebuild { - reason = "rebuild" - } - _, err := s.build(ctx, info.NamespacedResource, info.Count, reason, rebuild) + _, err := s.build(ctx, info.NamespacedResource, info.Count, reason, false) return err }) } @@ -457,30 +498,41 @@ func (s *searchSupport) init(ctx context.Context) error { defer span.End() start := time.Now().Unix() - totalBatchesIndexed, err := s.buildIndexes(ctx, false) + totalBatchesIndexed, err := s.buildIndexes(ctx) if err != nil { return err } span.AddEvent("namespaces indexed", trace.WithAttributes(attribute.Int("namespaced_indexed", totalBatchesIndexed))) - // since usage insights is not in unified storage, we need to periodically rebuild the index - // to make sure these data points are up to date. - if s.rebuildInterval > 0 { - go s.startPeriodicRebuild(origCtx) + subctx, cancel := context.WithCancel(origCtx) + + s.bgTaskCancel = cancel + for i := 0; i < s.rebuildWorkers; i++ { + s.bgTaskWg.Add(1) + go s.runIndexRebuilder(subctx) } + s.bgTaskWg.Add(1) + go s.runPeriodicScanForIndexesToRebuild(subctx) + end := time.Now().Unix() s.log.Info("search index initialized", "duration_secs", end-start, "total_docs", s.search.TotalDocs()) return nil } -func (s *searchSupport) startPeriodicRebuild(ctx context.Context) { - ticker := time.NewTicker(s.rebuildInterval) - defer ticker.Stop() +func (s *searchSupport) stop() { + // Stop background tasks. + s.bgTaskCancel() + s.bgTaskWg.Wait() +} - s.log.Info("starting periodic index rebuild", "interval", s.rebuildInterval) +func (s *searchSupport) runPeriodicScanForIndexesToRebuild(ctx context.Context) { + defer s.bgTaskWg.Done() + + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() for { select { @@ -488,35 +540,155 @@ func (s *searchSupport) startPeriodicRebuild(ctx context.Context) { s.log.Info("stopping periodic index rebuild due to context cancellation") return case <-ticker.C: - s.log.Info("starting periodic index rebuild") - if err := s.rebuildDashboardIndexes(ctx); err != nil { - s.log.Error("error during periodic index rebuild", "error", err) - } else { - s.log.Info("periodic index rebuild completed successfully") + s.findIndexesToRebuild(time.Now()) + } + } +} + +func (s *searchSupport) findIndexesToRebuild(now time.Time) { + // Check all open indexes and see if any of them need to be rebuilt. + // This is done periodically to make sure that the indexes are up to date. + + keys := s.search.GetOpenIndexes() + for _, key := range keys { + idx := s.search.GetIndex(key) + if idx == nil { + // This can happen if index was closed in the meantime. + continue + } + + maxAge := s.maxIndexAge + if key.Resource == dashboardv1.DASHBOARD_RESOURCE { + maxAge = s.dashboardIndexMaxAge + } + + var minBuildTime time.Time + if maxAge > 0 { + minBuildTime = now.Add(-maxAge) + } + + bi, err := idx.BuildInfo() + if err != nil { + s.log.Error("failed to get build info for index to rebuild", "key", key, "error", err) + continue + } + + if shouldRebuildIndex(s.minBuildVersion, bi, minBuildTime, nil) { + s.rebuildQueue.Add(rebuildRequest{ + NamespacedResource: key, + minBuildTime: minBuildTime, + minBuildVersion: s.minBuildVersion, + }) + + if s.indexMetrics != nil { + s.indexMetrics.RebuildQueueLength.Set(float64(s.rebuildQueue.Len())) } } } } -func (s *searchSupport) rebuildDashboardIndexes(ctx context.Context) error { - ctx, span := s.tracer.Start(ctx, tracingPrexfixSearch+"RebuildDashboardIndexes") +// runIndexRebuilder is a goroutine waiting for rebuild requests, and rebuilds indexes specified in those requests. +// Rebuild requests can be generated periodically (if configured), or after new documents have been imported into the storage with old RVs. +func (s *searchSupport) runIndexRebuilder(ctx context.Context) { + defer s.bgTaskWg.Done() + + for { + req, err := s.rebuildQueue.Next(ctx) + if err != nil { + s.log.Info("index rebuilder stopped", "error", err) + return + } + + if s.indexMetrics != nil { + s.indexMetrics.RebuildQueueLength.Set(float64(s.rebuildQueue.Len())) + } + + s.rebuildIndex(ctx, req) + } +} + +func (s *searchSupport) rebuildIndex(ctx context.Context, req rebuildRequest) { + ctx, span := s.tracer.Start(ctx, tracingPrexfixSearch+"RebuildIndex") defer span.End() - start := time.Now() - s.log.Info("rebuilding all search indexes") + l := s.log.With("namespace", req.Namespace, "group", req.Group, "resource", req.Resource) - totalBatchesIndexed, err := s.buildIndexes(ctx, true) - if err != nil { - return fmt.Errorf("failed to rebuild dashboard indexes: %w", err) + idx := s.search.GetIndex(req.NamespacedResource) + if idx == nil { + span.AddEvent("index not found") + l.Error("index not found") + return } - end := time.Now() - duration := end.Sub(start) - s.log.Info("completed rebuilding all dashboard search indexes", - "duration", duration, - "rebuilt_indexes", totalBatchesIndexed, - "total_docs", s.search.TotalDocs()) - return nil + bi, err := idx.BuildInfo() + if err != nil { + span.RecordError(err) + l.Error("failed to get build info for index to rebuild", "error", err) + } + + rebuild := shouldRebuildIndex(req.minBuildVersion, bi, req.minBuildTime, l) + if !rebuild { + span.AddEvent("index not rebuilt") + l.Info("index doesn't need to be rebuilt") + return + } + + if req.Resource == dashboardv1.DASHBOARD_RESOURCE { + // we need to clear the cache to make sure we get the latest usage insights data + s.builders.clearNamespacedCache(req.NamespacedResource) + } + + // Get the correct value of size + RV for building the index. This is important for our Bleve + // backend to decide whether to build index in-memory or as file-based. + stats, err := s.storage.GetResourceStats(ctx, req.Namespace, 0) + if err != nil { + span.RecordError(fmt.Errorf("failed to get resource stats: %w", err)) + l.Error("failed to get resource stats", "error", err) + return + } + + size := int64(0) + for _, stat := range stats { + if stat.Namespace == req.Namespace && stat.Group == req.Group && stat.Resource == req.Resource { + size = stat.Count + break + } + } + + _, err = s.build(ctx, req.NamespacedResource, size, "rebuild", true) + if err != nil { + span.RecordError(err) + l.Error("failed to rebuild index", "error", err) + } +} + +func shouldRebuildIndex(minBuildVersion *semver.Version, buildInfo IndexBuildInfo, minBuildTime time.Time, rebuildLogger *slog.Logger) bool { + if !minBuildTime.IsZero() { + if buildInfo.BuildTime.IsZero() || buildInfo.BuildTime.Before(minBuildTime) { + if rebuildLogger != nil { + rebuildLogger.Info("index build time is before minBuildTime, rebuilding the index", "indexBuildTime", buildInfo.BuildTime, "minBuildTime", minBuildTime) + } + return true + } + } + + if minBuildVersion != nil { + if buildInfo.BuildVersion == nil || buildInfo.BuildVersion.Compare(minBuildVersion) < 0 { + if rebuildLogger != nil { + rebuildLogger.Info("index build version is before minBuildVersion, rebuilding the index", "indexBuildVersion", buildInfo.BuildVersion, "minBuildVersion", minBuildVersion) + } + return true + } + } + + return false +} + +type rebuildRequest struct { + NamespacedResource + + minBuildTime time.Time // if not zero, only rebuild index if it has been built before this timestamp + minBuildVersion *semver.Version // if not nil, only rebuild index with build version older than this. } func (s *searchSupport) getOrCreateIndex(ctx context.Context, key NamespacedResource, reason string) (ResourceIndex, error) { @@ -533,11 +705,7 @@ func (s *searchSupport) getOrCreateIndex(ctx context.Context, key NamespacedReso attribute.String("namespace", key.Namespace), ) - idx, err := s.search.GetIndex(ctx, key) - if err != nil { - return nil, tracing.Error(span, err) - } - + idx := s.search.GetIndex(key) if idx == nil { span.AddEvent("Building index") ch := s.buildIndex.DoChan(key.String(), func() (interface{}, error) { @@ -547,8 +715,8 @@ func (s *searchSupport) getOrCreateIndex(ctx context.Context, key NamespacedReso // Recheck if some other goroutine managed to build an index in the meantime. // (That is, it finished running this function and stored the index into the cache) - idx, err := s.search.GetIndex(ctx, key) - if err == nil && idx != nil { + idx := s.search.GetIndex(key) + if idx != nil { return idx, nil } @@ -590,7 +758,7 @@ func (s *searchSupport) getOrCreateIndex(ctx context.Context, key NamespacedReso span.AddEvent("Updating index") start := time.Now() - rv, err := idx.UpdateIndex(ctx, reason) + rv, err := idx.UpdateIndex(ctx) if err != nil { return nil, tracing.Error(span, fmt.Errorf("failed to update index to guarantee strong consistency: %w", err)) } diff --git a/pkg/storage/unified/resource/search_test.go b/pkg/storage/unified/resource/search_test.go index be34e3a5724..34c9ee6927c 100644 --- a/pkg/storage/unified/resource/search_test.go +++ b/pkg/storage/unified/resource/search_test.go @@ -9,11 +9,16 @@ import ( "testing" "time" + "log/slog" + + "github.com/Masterminds/semver" "github.com/grafana/authlib/types" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace/noop" + dashboardv1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1" + "github.com/grafana/grafana/pkg/infra/log/logtest" "github.com/grafana/grafana/pkg/storage/unified/resourcepb" ) @@ -26,7 +31,13 @@ type MockResourceIndex struct { updateIndexError error updateIndexMu sync.Mutex - updateIndexCalls []string + updateIndexCalls int + + buildInfo IndexBuildInfo +} + +func (m *MockResourceIndex) BuildInfo() (IndexBuildInfo, error) { + return m.buildInfo, nil } func (m *MockResourceIndex) BulkIndex(req *BulkIndexRequest) error { @@ -54,11 +65,11 @@ func (m *MockResourceIndex) ListManagedObjects(ctx context.Context, req *resourc return args.Get(0).(*resourcepb.ListManagedObjectsResponse), args.Error(1) } -func (m *MockResourceIndex) UpdateIndex(ctx context.Context, reason string) (int64, error) { +func (m *MockResourceIndex) UpdateIndex(_ context.Context) (int64, error) { m.updateIndexMu.Lock() defer m.updateIndexMu.Unlock() - m.updateIndexCalls = append(m.updateIndexCalls, reason) + m.updateIndexCalls++ return 0, m.updateIndexError } @@ -120,10 +131,11 @@ func (m *mockStorageBackend) ListModifiedSince(ctx context.Context, key Namespac // mockSearchBackend implements SearchBackend for testing with tracking capabilities type mockSearchBackend struct { - mu sync.Mutex - buildIndexCalls []buildIndexCall - buildEmptyIndexCalls []buildEmptyIndexCall - cache map[NamespacedResource]ResourceIndex + openIndexes []NamespacedResource + + mu sync.Mutex + buildIndexCalls []buildIndexCall + cache map[NamespacedResource]ResourceIndex } type buildIndexCall struct { @@ -132,16 +144,10 @@ type buildIndexCall struct { fields SearchableDocumentFields } -type buildEmptyIndexCall struct { - key NamespacedResource - size int64 // should be 0 for empty indexes - fields SearchableDocumentFields -} - -func (m *mockSearchBackend) GetIndex(ctx context.Context, key NamespacedResource) (ResourceIndex, error) { +func (m *mockSearchBackend) GetIndex(key NamespacedResource) ResourceIndex { m.mu.Lock() defer m.mu.Unlock() - return m.cache[key], nil + return m.cache[key] } func (m *mockSearchBackend) BuildIndex(ctx context.Context, key NamespacedResource, size int64, fields SearchableDocumentFields, reason string, builder BuildFn, updater UpdateFn, rebuild bool) (ResourceIndex, error) { @@ -165,21 +171,11 @@ func (m *mockSearchBackend) BuildIndex(ctx context.Context, key NamespacedResour // Determine if this is an empty index based on size // Empty indexes are characterized by size == 0 - if size == 0 { - // This is an empty index (buildEmptyIndex was called) - m.buildEmptyIndexCalls = append(m.buildEmptyIndexCalls, buildEmptyIndexCall{ - key: key, - size: size, - fields: fields, - }) - } else { - // This is a normal index (build was called) - m.buildIndexCalls = append(m.buildIndexCalls, buildIndexCall{ - key: key, - size: size, - fields: fields, - }) - } + m.buildIndexCalls = append(m.buildIndexCalls, buildIndexCall{ + key: key, + size: size, + fields: fields, + }) return index, nil } @@ -188,6 +184,10 @@ func (m *mockSearchBackend) TotalDocs() int64 { return 0 } +func (m *mockSearchBackend) GetOpenIndexes() []NamespacedResource { + return m.openIndexes +} + func TestSearchGetOrCreateIndex(t *testing.T) { // Setup mock implementations storage := &mockStorageBackend{ @@ -195,22 +195,17 @@ func TestSearchGetOrCreateIndex(t *testing.T) { {NamespacedResource: NamespacedResource{Namespace: "ns", Group: "group", Resource: "resource"}, Count: 50, ResourceVersion: 11111111}, }, } - search := &mockSearchBackend{ - buildIndexCalls: []buildIndexCall{}, - buildEmptyIndexCalls: []buildEmptyIndexCall{}, - } + search := &mockSearchBackend{} supplier := &TestDocumentBuilderSupplier{ GroupsResources: map[string]string{ "group": "resource", }, } - // Create search support with the specified initMaxSize opts := SearchOptions{ - Backend: search, - Resources: supplier, - WorkerThreads: 1, - InitMinCount: 1, // set min count to default for this test + Backend: search, + Resources: supplier, + InitMinCount: 1, // set min count to default for this test } support, err := newSearchSupport(opts, storage, nil, nil, noop.NewTracerProvider().Tracer("test"), nil, nil) @@ -250,9 +245,6 @@ func TestSearchGetOrCreateIndexWithIndexUpdate(t *testing.T) { } failedErr := fmt.Errorf("failed to update index") search := &mockSearchBackend{ - buildIndexCalls: []buildIndexCall{}, - buildEmptyIndexCalls: []buildEmptyIndexCall{}, - cache: map[NamespacedResource]ResourceIndex{ {Namespace: "ns", Group: "group", Resource: "bad"}: &MockResourceIndex{ updateIndexError: failedErr, @@ -265,12 +257,10 @@ func TestSearchGetOrCreateIndexWithIndexUpdate(t *testing.T) { }, } - // Create search support with the specified initMaxSize opts := SearchOptions{ - Backend: search, - Resources: supplier, - WorkerThreads: 1, - InitMinCount: 1, // set min count to default for this test + Backend: search, + Resources: supplier, + InitMinCount: 1, // set min count to default for this test } // Enable searchAfterWrite @@ -281,24 +271,24 @@ func TestSearchGetOrCreateIndexWithIndexUpdate(t *testing.T) { idx, err := support.getOrCreateIndex(context.Background(), NamespacedResource{Namespace: "ns", Group: "group", Resource: "resource"}, "initial call") require.NoError(t, err) require.NotNil(t, idx) - checkMockIndexUpdateCalls(t, idx, []string{"initial call"}) + checkMockIndexUpdateCalls(t, idx, 1) idx, err = support.getOrCreateIndex(context.Background(), NamespacedResource{Namespace: "ns", Group: "group", Resource: "resource"}, "second call") require.NoError(t, err) require.NotNil(t, idx) - checkMockIndexUpdateCalls(t, idx, []string{"initial call", "second call"}) + checkMockIndexUpdateCalls(t, idx, 2) idx, err = support.getOrCreateIndex(context.Background(), NamespacedResource{Namespace: "ns", Group: "group", Resource: "bad"}, "call to bad index") require.ErrorIs(t, err, failedErr) require.Nil(t, idx) } -func checkMockIndexUpdateCalls(t *testing.T, idx ResourceIndex, strings []string) { +func checkMockIndexUpdateCalls(t *testing.T, idx ResourceIndex, calls int) { mi, ok := idx.(*MockResourceIndex) require.True(t, ok) mi.updateIndexMu.Lock() defer mi.updateIndexMu.Unlock() - require.Equal(t, strings, mi.updateIndexCalls) + require.Equal(t, calls, mi.updateIndexCalls) } func TestSearchGetOrCreateIndexWithCancellation(t *testing.T) { @@ -317,12 +307,10 @@ func TestSearchGetOrCreateIndexWithCancellation(t *testing.T) { }, } - // Create search support with the specified initMaxSize opts := SearchOptions{ - Backend: search, - Resources: supplier, - WorkerThreads: 1, - InitMinCount: 1, // set min count to default for this test + Backend: search, + Resources: supplier, + InitMinCount: 1, // set min count to default for this test } support, err := newSearchSupport(opts, storage, nil, nil, noop.NewTracerProvider().Tracer("test"), nil, nil) @@ -345,8 +333,8 @@ func TestSearchGetOrCreateIndexWithCancellation(t *testing.T) { // Wait until new index is put into cache. require.Eventually(t, func() bool { - idx, err := support.search.GetIndex(ctx, key) - return err == nil && idx != nil + idx := support.search.GetIndex(key) + return idx != nil }, 1*time.Second, 100*time.Millisecond, "Indexing finishes despite context cancellation") // Second call to getOrCreateIndex returns index immediately, even if context is canceled, as the index is now ready and cached. @@ -359,10 +347,10 @@ type slowSearchBackendWithCache struct { wg sync.WaitGroup } -func (m *slowSearchBackendWithCache) GetIndex(ctx context.Context, key NamespacedResource) (ResourceIndex, error) { +func (m *slowSearchBackendWithCache) GetIndex(key NamespacedResource) ResourceIndex { m.mu.Lock() defer m.mu.Unlock() - return m.cache[key], nil + return m.cache[key] } func (m *slowSearchBackendWithCache) BuildIndex(ctx context.Context, key NamespacedResource, size int64, fields SearchableDocumentFields, reason string, builder BuildFn, updater UpdateFn, rebuild bool) (ResourceIndex, error) { @@ -381,3 +369,348 @@ func (m *slowSearchBackendWithCache) BuildIndex(ctx context.Context, key Namespa } return idx, nil } + +func TestCombineBuildRequests(t *testing.T) { + type testcase struct { + a, b rebuildRequest + exp rebuildRequest + expOK bool + } + + now := time.Now() + for name, tc := range map[string]testcase{ + "mismatched resource": { + a: rebuildRequest{NamespacedResource: NamespacedResource{Namespace: "a", Group: "a", Resource: "a"}}, + b: rebuildRequest{NamespacedResource: NamespacedResource{Namespace: "b", Group: "b", Resource: "b"}}, + expOK: false, + }, + "equal values": { + a: rebuildRequest{minBuildTime: now, minBuildVersion: semver.MustParse("10.15.20")}, + b: rebuildRequest{minBuildTime: now, minBuildVersion: semver.MustParse("10.15.20")}, + expOK: true, + exp: rebuildRequest{minBuildTime: now, minBuildVersion: semver.MustParse("10.15.20")}, + }, + "empty field": { + a: rebuildRequest{minBuildTime: now}, + b: rebuildRequest{minBuildVersion: semver.MustParse("10.15.20")}, + expOK: true, + exp: rebuildRequest{minBuildTime: now, minBuildVersion: semver.MustParse("10.15.20")}, + }, + "use max build time": { + a: rebuildRequest{minBuildTime: now.Add(2 * time.Hour)}, + b: rebuildRequest{minBuildTime: now.Add(-time.Hour)}, + expOK: true, + exp: rebuildRequest{minBuildTime: now.Add(2 * time.Hour)}, + }, + "use max version": { + a: rebuildRequest{minBuildVersion: semver.MustParse("12.10.99")}, + b: rebuildRequest{minBuildVersion: semver.MustParse("10.15.20")}, + expOK: true, + exp: rebuildRequest{minBuildVersion: semver.MustParse("12.10.99")}, + }, + "both fields": { + a: rebuildRequest{minBuildTime: now.Add(2 * time.Hour), minBuildVersion: semver.MustParse("12.10.99")}, + b: rebuildRequest{minBuildTime: now.Add(-time.Hour), minBuildVersion: semver.MustParse("10.15.20")}, + expOK: true, + exp: rebuildRequest{minBuildTime: now.Add(2 * time.Hour), minBuildVersion: semver.MustParse("12.10.99")}, + }, + } { + t.Run(name, func(t *testing.T) { + res1, ok := combineRebuildRequests(tc.a, tc.b) + require.Equal(t, tc.expOK, ok) + if ok { + require.Equal(t, tc.exp, res1) + } + + // commutativity + res2, ok := combineRebuildRequests(tc.b, tc.a) + require.Equal(t, tc.expOK, ok) + if ok { + require.Equal(t, tc.exp, res2) + } + }) + } +} + +func TestShouldRebuildIndex(t *testing.T) { + type testcase struct { + buildInfo IndexBuildInfo + minTime time.Time + minBuildVersion *semver.Version + + expected bool + } + + now := time.Now() + + for name, tc := range map[string]testcase{ + "empty build info, with no rebuild conditions": { + buildInfo: IndexBuildInfo{}, + expected: false, + }, + "empty build info, with minTime": { + buildInfo: IndexBuildInfo{}, + minTime: now, + expected: true, + }, + "empty build info, with minVersion": { + buildInfo: IndexBuildInfo{}, + minBuildVersion: semver.MustParse("10.15.20"), + expected: true, + }, + "build time before min time": { + buildInfo: IndexBuildInfo{BuildTime: now.Add(-2 * time.Hour)}, + minTime: now, + expected: true, + }, + "build time after min time": { + buildInfo: IndexBuildInfo{BuildTime: now.Add(2 * time.Hour)}, + minTime: now, + expected: false, + }, + "build version before min version": { + buildInfo: IndexBuildInfo{BuildVersion: semver.MustParse("10.15.19")}, + minBuildVersion: semver.MustParse("10.15.20"), + expected: true, + }, + "build version after min version": { + buildInfo: IndexBuildInfo{BuildVersion: semver.MustParse("11.0.0")}, + minBuildVersion: semver.MustParse("10.15.20"), + expected: false, + }, + } { + t.Run(name, func(t *testing.T) { + res := shouldRebuildIndex(tc.minBuildVersion, tc.buildInfo, tc.minTime, slog.New(&logtest.NopHandler{})) + require.Equal(t, tc.expected, res) + }) + } +} + +func TestFindIndexesForRebuild(t *testing.T) { + storage := &mockStorageBackend{ + resourceStats: []ResourceStats{ + {NamespacedResource: NamespacedResource{Namespace: "ns", Group: "group", Resource: "resource"}, Count: 50, ResourceVersion: 11111111}, + }, + } + + now := time.Now() + + search := &mockSearchBackend{ + openIndexes: []NamespacedResource{ + {Namespace: "resource-2h-v5", Group: "group", Resource: "folder"}, + {Namespace: "resource-2h-v6", Group: "group", Resource: "folder"}, + {Namespace: "resource-10h-v5", Group: "group", Resource: "folder"}, + {Namespace: "resource-10h-v6", Group: "group", Resource: "folder"}, + {Namespace: "resource-v5", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, + {Namespace: "resource-v6", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, + {Namespace: "resource-2h-v5", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, + {Namespace: "resource-2h-v6", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, + + // We report this index as open, but it's really not. This can happen if index expires between the call + // to GetOpenIndexes and the call to GetIndex. + {Namespace: "ns", Group: "group", Resource: "missing"}, + }, + + cache: map[NamespacedResource]ResourceIndex{ + // To be rebuilt because of minVersion + {Namespace: "resource-2h-v5", Group: "group", Resource: "folder"}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-2 * time.Hour), BuildVersion: semver.MustParse("5.0.0")}, + }, + + // Not rebuilt + {Namespace: "resource-2h-v6", Group: "group", Resource: "folder"}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-2 * time.Hour), BuildVersion: semver.MustParse("6.0.0")}, + }, + + // To be rebuilt because of minTime + {Namespace: "resource-10h-v5", Group: "group", Resource: "folder"}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-10 * time.Hour), BuildVersion: semver.MustParse("5.0.0")}, + }, + + // To be rebuilt because of minTime + {Namespace: "resource-10h-v6", Group: "group", Resource: "folder"}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-10 * time.Hour), BuildVersion: semver.MustParse("6.0.0")}, + }, + + // To be rebuilt because of minVersion + {Namespace: "resource-v5", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now, BuildVersion: semver.MustParse("5.0.0")}, + }, + + // Not rebuilt + {Namespace: "resource-v6", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now, BuildVersion: semver.MustParse("6.0.0")}, + }, + + // To be rebuilt because of minTime (1h for dashboards) + {Namespace: "resource-2h-v5", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-2 * time.Hour), BuildVersion: semver.MustParse("5.0.0")}, + }, + + // To be rebuilt because of minTime (1h for dashboards) + {Namespace: "resource-2h-v6", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-2 * time.Hour), BuildVersion: semver.MustParse("6.0.0")}, + }, + }, + } + + supplier := &TestDocumentBuilderSupplier{ + GroupsResources: map[string]string{ + "group": "resource", + }, + } + + opts := SearchOptions{ + Backend: search, + Resources: supplier, + + DashboardIndexMaxAge: 1 * time.Hour, + MaxIndexAge: 5 * time.Hour, + MinBuildVersion: semver.MustParse("5.5.5"), + } + + support, err := newSearchSupport(opts, storage, nil, nil, noop.NewTracerProvider().Tracer("test"), nil, nil) + require.NoError(t, err) + require.NotNil(t, support) + + support.findIndexesToRebuild(now) + require.Equal(t, 6, support.rebuildQueue.Len()) + + now5m := now.Add(5 * time.Minute) + + // Running findIndexesToRebuild again should not add any new indexes to the rebuild queue, and all existing + // ones should be "combined" with new ones (this will "bump" minBuildTime) + support.findIndexesToRebuild(now5m) + require.Equal(t, 6, support.rebuildQueue.Len()) + + // Values that we expect to find in rebuild requests. + minBuildVersion := semver.MustParse("5.5.5") + minBuildTime := now5m.Add(-5 * time.Hour) + minBuildTimeDashboard := now5m.Add(-1 * time.Hour) + + vals := support.rebuildQueue.Elements() + require.ElementsMatch(t, vals, []rebuildRequest{ + {NamespacedResource: NamespacedResource{Namespace: "resource-2h-v5", Group: "group", Resource: "folder"}, minBuildVersion: minBuildVersion, minBuildTime: minBuildTime}, + {NamespacedResource: NamespacedResource{Namespace: "resource-10h-v5", Group: "group", Resource: "folder"}, minBuildVersion: minBuildVersion, minBuildTime: minBuildTime}, + {NamespacedResource: NamespacedResource{Namespace: "resource-10h-v6", Group: "group", Resource: "folder"}, minBuildVersion: minBuildVersion, minBuildTime: minBuildTime}, + + {NamespacedResource: NamespacedResource{Namespace: "resource-v5", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, minBuildVersion: minBuildVersion, minBuildTime: minBuildTimeDashboard}, + {NamespacedResource: NamespacedResource{Namespace: "resource-2h-v5", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, minBuildVersion: minBuildVersion, minBuildTime: minBuildTimeDashboard}, + {NamespacedResource: NamespacedResource{Namespace: "resource-2h-v6", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}, minBuildVersion: minBuildVersion, minBuildTime: minBuildTimeDashboard}, + }) +} + +func TestRebuildIndexes(t *testing.T) { + storage := &mockStorageBackend{} + + now := time.Now() + + search := &mockSearchBackend{ + cache: map[NamespacedResource]ResourceIndex{ + {Namespace: "idx1", Group: "group", Resource: "res"}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildVersion: semver.MustParse("5.0.0")}, + }, + + {Namespace: "idx2", Group: "group", Resource: "res"}: &MockResourceIndex{ + buildInfo: IndexBuildInfo{BuildTime: now.Add(-2 * time.Hour)}, + }, + + {Namespace: "idx3", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE}: &MockResourceIndex{}, + }, + } + + supplier := &TestDocumentBuilderSupplier{ + GroupsResources: map[string]string{ + "group": "resource", + }, + } + + opts := SearchOptions{ + Backend: search, + Resources: supplier, + } + + support, err := newSearchSupport(opts, storage, nil, nil, noop.NewTracerProvider().Tracer("test"), nil, nil) + require.NoError(t, err) + require.NotNil(t, support) + + // Note: we can only rebuild each index once, after that it "loses" it's build info. + + t.Run("Don't rebuild if min build version is old", func(t *testing.T) { + checkRebuildIndex(t, support, rebuildRequest{ + NamespacedResource: NamespacedResource{Namespace: "idx1", Group: "group", Resource: "res"}, + minBuildVersion: semver.MustParse("4.5"), + }, true, false) + }) + + t.Run("Rebuild if min build version is more recent", func(t *testing.T) { + checkRebuildIndex(t, support, rebuildRequest{ + NamespacedResource: NamespacedResource{Namespace: "idx1", Group: "group", Resource: "res"}, + minBuildVersion: semver.MustParse("5.5.5"), + }, true, true) + }) + + t.Run("Don't rebuild if min build time is very old", func(t *testing.T) { + checkRebuildIndex(t, support, rebuildRequest{ + NamespacedResource: NamespacedResource{Namespace: "idx2", Group: "group", Resource: "res"}, + minBuildTime: now.Add(-5 * time.Hour), + }, true, false) + }) + + t.Run("Rebuild if min build time is more recent", func(t *testing.T) { + checkRebuildIndex(t, support, rebuildRequest{ + NamespacedResource: NamespacedResource{Namespace: "idx2", Group: "group", Resource: "res"}, + minBuildTime: now.Add(-1 * time.Hour), + }, true, true) + }) + + t.Run("Don't rebuild if index doesn't exist.", func(t *testing.T) { + checkRebuildIndex(t, support, rebuildRequest{ + NamespacedResource: NamespacedResource{Namespace: "unknown", Group: "group", Resource: "res"}, + minBuildTime: now.Add(-5 * time.Hour), + }, false, true) + }) + + t.Run("Rebuild dashboard index (it has no build info), verify that builders cache was emptied.", func(t *testing.T) { + dashKey := NamespacedResource{Namespace: "idx3", Group: "group", Resource: dashboardv1.DASHBOARD_RESOURCE} + + support.builders.ns.Add(dashKey, &MockDocumentBuilder{}) + _, ok := support.builders.ns.Get(dashKey) + require.True(t, ok) + + checkRebuildIndex(t, support, rebuildRequest{ + NamespacedResource: dashKey, + minBuildTime: now, + }, true, true) + + // Verify that builders cache was emptied. + _, ok = support.builders.ns.Get(dashKey) + require.False(t, ok) + }) +} + +func checkRebuildIndex(t *testing.T, support *searchSupport, req rebuildRequest, indexExists, expectedRebuild bool) { + ctx := context.Background() + + idxBefore := support.search.GetIndex(req.NamespacedResource) + if indexExists { + require.NotNil(t, idxBefore, "index should exist before rebuildIndex") + } else { + require.Nil(t, idxBefore, "index should not exist before rebuildIndex") + } + + support.rebuildIndex(ctx, req) + + idxAfter := support.search.GetIndex(req.NamespacedResource) + + if indexExists { + require.NotNil(t, idxAfter, "index should exist after rebuildIndex") + if expectedRebuild { + require.NotSame(t, idxBefore, idxAfter, "index should be rebuilt") + } else { + require.Same(t, idxBefore, idxAfter, "index should not be rebuilt") + } + } else { + require.Nil(t, idxAfter, "index should not exist after rebuildIndex") + } +} diff --git a/pkg/storage/unified/resource/server.go b/pkg/storage/unified/resource/server.go index 451c731f486..bedea3987c2 100644 --- a/pkg/storage/unified/resource/server.go +++ b/pkg/storage/unified/resource/server.go @@ -11,6 +11,7 @@ import ( "sync/atomic" "time" + "github.com/Masterminds/semver" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" @@ -21,9 +22,9 @@ import ( claims "github.com/grafana/authlib/types" "github.com/grafana/dskit/backoff" - "github.com/grafana/dskit/ring" "github.com/grafana/grafana/pkg/apimachinery/utils" + "github.com/grafana/grafana/pkg/apimachinery/validation" secrets "github.com/grafana/grafana/pkg/registry/apis/secret/contracts" "github.com/grafana/grafana/pkg/storage/unified/resourcepb" "github.com/grafana/grafana/pkg/util/scheduler" @@ -178,15 +179,26 @@ type SearchOptions struct { Resources DocumentBuilderSupplier // How many threads should build indexes - WorkerThreads int + InitWorkerThreads int // Skip building index on startup for small indexes InitMinCount int - // Interval for periodic index rebuilds (0 disables periodic rebuilds) - RebuildInterval time.Duration + // How often to rebuild dashboard index. 0 disables periodic rebuilds. + DashboardIndexMaxAge time.Duration - Ring *ring.Ring + // Maximum age of file-based index that can be reused. Ignored if zero. + MaxIndexAge time.Duration + + // Minimum build version for reusing file-based indexes. Ignored if nil. + MinBuildVersion *semver.Version + + // Number of workers to use for index rebuilds. + IndexRebuildWorkers int + + // Minimum time between index updates. This is also used as a delay after a successful write operation, to guarantee + // that subsequent search will observe the effect of the writing. + IndexMinUpdateInterval time.Duration } type ResourceServerOptions struct { @@ -329,6 +341,8 @@ func NewResourceServer(opts ResourceServerOptions) (*server, error) { reg: opts.Reg, queue: opts.QOSQueue, queueConfig: opts.QOSConfig, + + artificialSuccessfulWriteDelay: opts.Search.IndexMinUpdateInterval, } if opts.Search.Resources != nil { @@ -379,6 +393,11 @@ type server struct { reg prometheus.Registerer queue QOSEnqueuer queueConfig QueueConfig + + // This value is used by storage server to artificially delay returning response after successful + // write operations to make sure that subsequent search by the same client will return up-to-date results. + // Set from SearchOptions.IndexMinUpdateInterval. + artificialSuccessfulWriteDelay time.Duration } // Init implements ResourceServer. @@ -421,6 +440,10 @@ func (s *server) Stop(ctx context.Context) error { } } + if s.search != nil { + s.search.stop() + } + // Stops the streaming s.cancel() @@ -523,8 +546,8 @@ func (s *server) newEvent(ctx context.Context, user claims.AuthInfo, key *resour return nil, NewBadRequestError( fmt.Sprintf("key/name do not match (key: %s, name: %s)", key.Name, obj.GetName())) } - if err := validateName(obj.GetName()); err != nil { - return nil, err + if errs := validation.IsValidGrafanaName(obj.GetName()); err != nil { + return nil, NewBadRequestError(errs[0]) } // For folder moves, we need to check permissions on both folders @@ -546,8 +569,7 @@ func (s *server) newEvent(ctx context.Context, user claims.AuthInfo, key *resour check.Name = key.Name } - check.Folder = obj.GetFolder() - a, err := s.access.Check(ctx, user, check) + a, err := s.access.Check(ctx, user, check, obj.GetFolder()) if err != nil { return nil, AsErrorResult(err) } @@ -584,10 +606,9 @@ func (s *server) checkFolderMovePermissions(ctx context.Context, user claims.Aut Resource: key.Resource, Namespace: key.Namespace, Name: key.Name, - Folder: oldFolder, } - a, err := s.access.Check(ctx, user, updateCheck) + a, err := s.access.Check(ctx, user, updateCheck, oldFolder) if err != nil { return AsErrorResult(err) } @@ -604,10 +625,10 @@ func (s *server) checkFolderMovePermissions(ctx context.Context, user claims.Aut Group: key.Group, Resource: key.Resource, Namespace: key.Namespace, - Folder: newFolder, + Name: key.Name, } - a, err = s.access.Check(ctx, user, createCheck) + a, err = s.access.Check(ctx, user, createCheck, newFolder) if err != nil { return AsErrorResult(err) } @@ -652,6 +673,8 @@ func (s *server) Create(ctx context.Context, req *resourcepb.CreateRequest) (*re }) } + s.sleepAfterSuccessfulWriteOperation(res, err) + return res, err } @@ -675,6 +698,37 @@ func (s *server) create(ctx context.Context, user claims.AuthInfo, req *resource return rsp, nil } +type responseWithErrorResult interface { + GetError() *resourcepb.ErrorResult +} + +// sleepAfterSuccessfulWriteOperation will sleep for a specified time if the operation was successful. +// Returns boolean indicating whether the sleep was performed or not (used in testing). +// +// This sleep is performed to guarantee search-after-write consistency, when rate-limiting updates to search index. +func (s *server) sleepAfterSuccessfulWriteOperation(res responseWithErrorResult, err error) bool { + if s.artificialSuccessfulWriteDelay <= 0 { + return false + } + + if err != nil { + // No sleep necessary if operation failed. + return false + } + + // We expect that non-nil interface values with typed nils can still handle GetError() call. + if res != nil { + errRes := res.GetError() + if errRes != nil { + // No sleep necessary if operation failed. + return false + } + } + + time.Sleep(s.artificialSuccessfulWriteDelay) + return true +} + func (s *server) Update(ctx context.Context, req *resourcepb.UpdateRequest) (*resourcepb.UpdateResponse, error) { ctx, span := s.tracer.Start(ctx, "storage_server.Update") defer span.End() @@ -706,6 +760,8 @@ func (s *server) Update(ctx context.Context, req *resourcepb.UpdateRequest) (*re }) } + s.sleepAfterSuccessfulWriteOperation(res, err) + return res, err } @@ -722,8 +778,12 @@ func (s *server) update(ctx context.Context, user claims.AuthInfo, req *resource return rsp, nil } + // TODO: once we know the client is always sending the RV, require ResourceVersion > 0 + // See: https://github.com/grafana/grafana/pull/111866 if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion { - return nil, ErrOptimisticLockingFailed + return &resourcepb.UpdateResponse{ + Error: &ErrOptimisticLockingFailed, + }, nil } event, e := s.newEvent(ctx, user, req.Key, req.Value, latest.Value) @@ -774,6 +834,8 @@ func (s *server) Delete(ctx context.Context, req *resourcepb.DeleteRequest) (*re }) } + s.sleepAfterSuccessfulWriteOperation(res, err) + return res, err } @@ -787,7 +849,7 @@ func (s *server) delete(ctx context.Context, user claims.AuthInfo, req *resource return rsp, nil } if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion { - rsp.Error = AsErrorResult(ErrOptimisticLockingFailed) + rsp.Error = &ErrOptimisticLockingFailed return rsp, nil } @@ -797,8 +859,7 @@ func (s *server) delete(ctx context.Context, user claims.AuthInfo, req *resource Resource: req.Key.Resource, Namespace: req.Key.Namespace, Name: req.Key.Name, - Folder: latest.Folder, - }) + }, latest.Folder) if err != nil { rsp.Error = AsErrorResult(err) return rsp, nil @@ -864,10 +925,6 @@ func (s *server) Read(ctx context.Context, req *resourcepb.ReadRequest) (*resour }}, nil } - // if req.Key.Group == "" { - // status, _ := AsErrorResult(apierrors.NewBadRequest("missing group")) - // return &ReadResponse{Status: status}, nil - // } if req.Key.Resource == "" { return &resourcepb.ReadResponse{Error: NewBadRequestError("missing resource")}, nil } @@ -900,8 +957,7 @@ func (s *server) read(ctx context.Context, user claims.AuthInfo, req *resourcepb Resource: req.Key.Resource, Namespace: req.Key.Namespace, Name: req.Key.Name, - Folder: rsp.Folder, - }) + }, rsp.Folder) if err != nil { return &resourcepb.ReadResponse{Error: AsErrorResult(err)}, nil } diff --git a/pkg/storage/unified/resource/server_test.go b/pkg/storage/unified/resource/server_test.go index 60f62b4df22..66536b4d5d8 100644 --- a/pkg/storage/unified/resource/server_test.go +++ b/pkg/storage/unified/resource/server_test.go @@ -3,6 +3,7 @@ package resource import ( "context" "encoding/json" + "errors" "fmt" "log/slog" "net/http" @@ -21,6 +22,7 @@ import ( authlib "github.com/grafana/authlib/types" "github.com/grafana/dskit/services" + "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/infra/log" @@ -365,7 +367,7 @@ func TestSimpleServer(t *testing.T) { invalidQualifiedNames := []string{ "", // empty - strings.Repeat("1", MaxNameLength+1), // too long + strings.Repeat("1", 260), // too long " ", // only spaces "f8cc010c.ee72.4681;89d2+d46e1bd47d33", // invalid chars } @@ -477,11 +479,12 @@ func TestSimpleServer(t *testing.T) { ResourceVersion: created.ResourceVersion}) require.NoError(t, err) - _, err = server.Update(ctx, &resourcepb.UpdateRequest{ + rsp, _ := server.Update(ctx, &resourcepb.UpdateRequest{ Key: key, Value: raw, ResourceVersion: created.ResourceVersion}) - require.ErrorIs(t, err, ErrOptimisticLockingFailed) + require.Equal(t, rsp.Error.Code, ErrOptimisticLockingFailed.Code) + require.Equal(t, rsp.Error.Message, ErrOptimisticLockingFailed.Message) }) } @@ -586,3 +589,30 @@ func newTestServerWithQueue(t *testing.T, maxSizePerTenant int, numWorkers int) } return s, q } + +func TestArtificialDelayAfterSuccessfulOperation(t *testing.T) { + s := &server{artificialSuccessfulWriteDelay: 1 * time.Millisecond} + + check := func(t *testing.T, expectedSleep bool, res responseWithErrorResult, err error) { + slept := s.sleepAfterSuccessfulWriteOperation(res, err) + require.Equal(t, expectedSleep, slept) + } + + // Successful responses should sleep + check(t, true, nil, nil) + + check(t, true, (responseWithErrorResult)((*resourcepb.CreateResponse)(nil)), nil) + check(t, true, &resourcepb.CreateResponse{}, nil) + + check(t, true, (responseWithErrorResult)((*resourcepb.UpdateResponse)(nil)), nil) + check(t, true, &resourcepb.UpdateResponse{}, nil) + + check(t, true, (responseWithErrorResult)((*resourcepb.DeleteResponse)(nil)), nil) + check(t, true, &resourcepb.DeleteResponse{}, nil) + + // Failed responses should return without sleeping + check(t, false, nil, errors.New("some error")) + check(t, false, &resourcepb.CreateResponse{Error: AsErrorResult(errors.New("some error"))}, nil) + check(t, false, &resourcepb.UpdateResponse{Error: AsErrorResult(errors.New("some error"))}, nil) + check(t, false, &resourcepb.DeleteResponse{Error: AsErrorResult(errors.New("some error"))}, nil) +} diff --git a/pkg/storage/unified/resource/validation.go b/pkg/storage/unified/resource/validation.go deleted file mode 100644 index 1ba74263cd9..00000000000 --- a/pkg/storage/unified/resource/validation.go +++ /dev/null @@ -1,85 +0,0 @@ -package resource - -import ( - "fmt" - "regexp" - - "github.com/grafana/grafana/pkg/storage/unified/resourcepb" - "k8s.io/apimachinery/pkg/util/validation" -) - -const MaxNameLength = 253 -const MaxNamespaceLength = 40 -const MaxGroupLength = 60 -const MaxResourceLength = 40 - -var validNameCharPattern = `a-zA-Z0-9:\-\_\.` -var validNamePattern = regexp.MustCompile(`^[` + validNameCharPattern + `]*$`).MatchString - -func validateName(name string) *resourcepb.ErrorResult { - if len(name) == 0 { - return NewBadRequestError("name is too short") - } - if len(name) > MaxNameLength { - return NewBadRequestError("name is too long") - } - if !validNamePattern(name) { - return NewBadRequestError("name includes invalid characters") - } - // In standard k8s, it must not start with a number - // however that would force us to update many many many existing resources - // so we will be slightly more lenient than standard k8s - return nil -} - -func validateNamespace(value string) *resourcepb.ErrorResult { - if len(value) == 0 { - // empty namespace is allowed (means cluster-scoped) - return nil - } - - if len(value) > MaxNamespaceLength { - return NewBadRequestError("value is too long") - } - - err := validation.IsQualifiedName(value) - if len(err) > 0 { - return NewBadRequestError(fmt.Sprintf("name is not a valid qualified name: %+v", err)) - } - - return nil -} - -func validateGroup(value string) *resourcepb.ErrorResult { - if len(value) == 0 { - return NewBadRequestError("value is too short") - } - - if len(value) > MaxGroupLength { - return NewBadRequestError("value is too long") - } - - err := validation.IsQualifiedName(value) - if len(err) > 0 { - return NewBadRequestError(fmt.Sprintf("name is not a valid qualified name: %+v", err)) - } - - return nil -} - -func validateResource(value string) *resourcepb.ErrorResult { - if len(value) == 0 { - return NewBadRequestError("value is too short") - } - - if len(value) > MaxResourceLength { - return NewBadRequestError("value is too long") - } - - err := validation.IsQualifiedName(value) - if len(err) > 0 { - return NewBadRequestError(fmt.Sprintf("name is not a valid qualified name: %+v", err)) - } - - return nil -} diff --git a/pkg/storage/unified/resource/validation_test.go b/pkg/storage/unified/resource/validation_test.go deleted file mode 100644 index bc0e4bd8bda..00000000000 --- a/pkg/storage/unified/resource/validation_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package resource - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNameValidation(t *testing.T) { - require.NotNil(t, validateName("")) // too short - require.NotNil(t, validateName(strings.Repeat("0", 254))) // too long (max 253) - - // OK - require.Nil(t, validateName("a")) - require.Nil(t, validateName("hello-world")) - require.Nil(t, validateName("hello.world")) - require.Nil(t, validateName("hello_world")) - require.Nil(t, validateName("hello:world")) - - // Bad characters - require.NotNil(t, validateName("hello world")) - require.NotNil(t, validateName("hello!")) - require.NotNil(t, validateName("hello~")) - require.NotNil(t, validateName("hello ")) - require.NotNil(t, validateName("hello*")) - require.NotNil(t, validateName("hello+")) - require.NotNil(t, validateName("hello=")) - require.NotNil(t, validateName("hello%")) -} diff --git a/pkg/storage/unified/search/bleve.go b/pkg/storage/unified/search/bleve.go index ef06f585363..190322bab7c 100644 --- a/pkg/storage/unified/search/bleve.go +++ b/pkg/storage/unified/search/bleve.go @@ -52,7 +52,7 @@ const ( // Keys used to store internal data in index. const ( internalRVKey = "rv" // Encoded as big-endian int64 - internalBuildInfoKey = "build_info" // Encoded as JSON of IndexBuildInfo struct + internalBuildInfoKey = "build_info" // Encoded as JSON of buildInfo struct ) var _ resource.SearchBackend = &bleveBackend{} @@ -75,13 +75,13 @@ type BleveOptions struct { BuildVersion string - MaxFileIndexAge time.Duration // Maximum age of file-based index that can be reused. Ignored if zero. - MinBuildVersion *semver.Version // Minimum build version for reusing file-based indexes. Ignored if nil. - Logger *slog.Logger UseFullNgram bool + // Minimum time between index updates. + IndexMinUpdateInterval time.Duration + // This function is called to check whether the index is owned by the current instance. // Indexes that are not owned by current instance are eligible for cleanup. // If nil, all indexes are owned by the current instance. @@ -170,13 +170,24 @@ func NewBleveBackend(opts BleveOptions, tracer trace.Tracer, indexMetrics *resou } // GetIndex will return nil if the key does not exist -func (b *bleveBackend) GetIndex(_ context.Context, key resource.NamespacedResource) (resource.ResourceIndex, error) { +func (b *bleveBackend) GetIndex(key resource.NamespacedResource) resource.ResourceIndex { idx := b.getCachedIndex(key, time.Now()) // Avoid returning typed nils. if idx == nil { - return nil, nil + return nil } - return idx, nil + return idx +} + +func (b *bleveBackend) GetOpenIndexes() []resource.NamespacedResource { + b.cacheMx.RLock() + defer b.cacheMx.RUnlock() + + result := make([]resource.NamespacedResource, 0, len(b.cache)) + for key := range b.cache { + result = append(result, key) + } + return result } func (b *bleveBackend) getCachedIndex(key resource.NamespacedResource, now time.Time) *bleveIndex { @@ -318,7 +329,7 @@ func newBleveIndex(path string, mapper mapping.IndexMapping, buildTime time.Time return nil, err } - bi := IndexBuildInfo{ + bi := buildInfo{ BuildTime: buildTime.Unix(), BuildVersion: buildVersion, } @@ -336,29 +347,11 @@ func newBleveIndex(path string, mapper mapping.IndexMapping, buildTime time.Time return ix, nil } -type IndexBuildInfo struct { +type buildInfo struct { BuildTime int64 `json:"build_time"` // Unix seconds timestamp of time when the index was built BuildVersion string `json:"build_version"` // Grafana version used when building the index } -func (bi IndexBuildInfo) GetBuildTime() time.Time { - if bi.BuildTime == 0 { - return time.Time{} - } - return time.Unix(bi.BuildTime, 0) -} - -func (bi IndexBuildInfo) GetBuildVersion() *semver.Version { - if bi.BuildVersion == "" { - return nil - } - v, err := semver.NewVersion(bi.BuildVersion) - if err != nil { - return nil - } - return v -} - // BuildIndex builds an index from scratch or retrieves it from the filesystem. // If built successfully, the new index replaces the old index in the cache (if there was any). // Existing index in the file system is reused, if it exists, and if size indicates that we should use file-based index, and rebuild is not true. @@ -435,11 +428,7 @@ func (b *bleveBackend) BuildIndex( // This happens on startup, or when memory-based index has expired. (We don't expire file-based indexes) // If we do have an unexpired cached index already, we always build a new index from scratch. if cachedIndex == nil && !rebuild { - minBuildTime := time.Time{} - if b.opts.MaxFileIndexAge > 0 { - minBuildTime = time.Now().Add(-b.opts.MaxFileIndexAge) - } - index, fileIndexName, indexRV = b.findPreviousFileBasedIndex(resourceDir, minBuildTime, b.opts.MinBuildVersion) + index, fileIndexName, indexRV = b.findPreviousFileBasedIndex(resourceDir) } if index != nil { @@ -621,18 +610,6 @@ func isPathWithinRoot(path, absoluteRoot string) bool { return true } -// cacheKeys returns list of keys for indexes in the cache (including possibly expired ones). -func (b *bleveBackend) cacheKeys() []resource.NamespacedResource { - b.cacheMx.RLock() - defer b.cacheMx.RUnlock() - - keys := make([]resource.NamespacedResource, 0, len(b.cache)) - for k := range b.cache { - keys = append(keys, k) - } - return keys -} - // TotalDocs returns the total number of documents across all indices func (b *bleveBackend) TotalDocs() int64 { var totalDocs int64 @@ -640,7 +617,7 @@ func (b *bleveBackend) TotalDocs() int64 { // We do this to avoid keeping a lock for the entire TotalDocs function, since DocCount may be slow (due to disk access). now := time.Now() - for _, key := range b.cacheKeys() { + for _, key := range b.GetOpenIndexes() { idx := b.getCachedIndex(key, now) if idx == nil { continue @@ -658,7 +635,7 @@ func formatIndexName(now time.Time) string { return now.Format("20060102-150405") } -func (b *bleveBackend) findPreviousFileBasedIndex(resourceDir string, minBuildTime time.Time, minBuildVersion *semver.Version) (bleve.Index, string, int64) { +func (b *bleveBackend) findPreviousFileBasedIndex(resourceDir string) (bleve.Index, string, int64) { entries, err := os.ReadDir(resourceDir) if err != nil { return nil, "", 0 @@ -684,31 +661,6 @@ func (b *bleveBackend) findPreviousFileBasedIndex(resourceDir string, minBuildTi continue } - buildInfo, err := getBuildInfo(idx) - if err != nil { - b.log.Error("error getting build info from index", "indexDir", indexDir, "err", err) - _ = idx.Close() - continue - } - - if !minBuildTime.IsZero() { - bt := buildInfo.GetBuildTime() - if bt.IsZero() || bt.Before(minBuildTime) { - b.log.Debug("index build time is before minBuildTime, not reusing the index", "indexDir", indexDir, "indexBuildTime", bt, "minBuildTime", minBuildTime) - _ = idx.Close() - continue - } - } - - if minBuildVersion != nil { - bv := buildInfo.GetBuildVersion() - if bv == nil || bv.Compare(minBuildVersion) < 0 { - b.log.Debug("index build version is before minBuildVersion, not reusing the index", "indexDir", indexDir, "indexBuildVersion", bv, "minBuildVersion", minBuildVersion) - _ = idx.Close() - continue - } - } - return idx, indexName, indexRV } @@ -740,8 +692,8 @@ func (b *bleveBackend) closeAllIndexes() { } type updateRequest struct { - reason string - callback chan updateResult + requestTime time.Time + callback chan updateResult } type updateResult struct { @@ -756,6 +708,10 @@ type bleveIndex struct { // RV returned by last List/ListModifiedSince operation. Updated when updating index. resourceVersion int64 + // Timestamp when the last update to the index was done (started). + // Subsequent update requests only trigger new update if minUpdateInterval has elapsed. + nextUpdateTime time.Time + standard resource.SearchableDocumentFields fields resource.SearchableDocumentFields @@ -770,7 +726,8 @@ type bleveIndex struct { tracing trace.Tracer logger *slog.Logger - updaterFn resource.UpdateFn + updaterFn resource.UpdateFn + minUpdateInterval time.Duration updaterMu sync.Mutex updaterCond *sync.Cond // Used to signal the updater goroutine that there is work to do, or updater is no longer enabled and should stop. Also used by updater itself to stop early if there's no work to be done. @@ -797,15 +754,16 @@ func (b *bleveBackend) newBleveIndex( logger *slog.Logger, ) *bleveIndex { bi := &bleveIndex{ - key: key, - index: index, - indexStorage: newIndexType, - fields: fields, - allFields: allFields, - standard: standardSearchFields, - tracing: b.tracer, - logger: logger, - updaterFn: updaterFn, + key: key, + index: index, + indexStorage: newIndexType, + fields: fields, + allFields: allFields, + standard: standardSearchFields, + tracing: b.tracer, + logger: logger, + updaterFn: updaterFn, + minUpdateInterval: b.opts.IndexMinUpdateInterval, } bi.updaterCond = sync.NewCond(&bi.updaterMu) if b.indexMetrics != nil { @@ -878,21 +836,46 @@ func getRV(index bleve.Index) (int64, error) { return int64(binary.BigEndian.Uint64(raw)), nil } -func getBuildInfo(index bleve.Index) (IndexBuildInfo, error) { +func getBuildInfo(index bleve.Index) (buildInfo, error) { raw, err := index.GetInternal([]byte(internalBuildInfoKey)) if err != nil { - return IndexBuildInfo{}, err + return buildInfo{}, err } if len(raw) == 0 { - return IndexBuildInfo{}, nil + return buildInfo{}, nil } - res := IndexBuildInfo{} + res := buildInfo{} err = json.Unmarshal(raw, &res) return res, err } +func (b *bleveIndex) BuildInfo() (resource.IndexBuildInfo, error) { + bi, err := getBuildInfo(b.index) + if err != nil { + return resource.IndexBuildInfo{}, err + } + + bt := time.Time{} + if bi.BuildTime > 0 { + bt = time.Unix(bi.BuildTime, 0) + } + + var bv *semver.Version + if bi.BuildVersion != "" { + v, err := semver.NewVersion(bi.BuildVersion) + if err == nil { + bv = v + } + } + + return resource.IndexBuildInfo{ + BuildTime: bt, + BuildVersion: bv, + }, nil +} + func (b *bleveIndex) ListManagedObjects(ctx context.Context, req *resourcepb.ListManagedObjectsRequest) (*resourcepb.ListManagedObjectsResponse, error) { if req.NextPageToken != "" { return nil, fmt.Errorf("next page not implemented yet") @@ -1244,16 +1227,19 @@ func (b *bleveIndex) toBleveSearchRequest(ctx context.Context, req *resourcepb.R // Query 1: Match the exact query string queryExact := bleve.NewMatchQuery(req.Query) queryExact.SetBoost(10.0) + queryExact.SetField(resource.SEARCH_FIELD_TITLE) queryExact.Analyzer = keyword.Name // don't analyze the query input - treat it as a single token queryExact.Operator = query.MatchQueryOperatorAnd // This doesn't make a difference for keyword analyzer, we add it just to be explicit. // Query 2: Phrase query with standard analyzer queryPhrase := bleve.NewMatchPhraseQuery(req.Query) queryPhrase.SetBoost(5.0) + queryPhrase.SetField(resource.SEARCH_FIELD_TITLE) queryPhrase.Analyzer = standard.Name // Query 3: Match query with standard analyzer queryAnalyzed := bleve.NewMatchQuery(removeSmallTerms(req.Query)) + queryAnalyzed.SetField(resource.SEARCH_FIELD_TITLE) queryAnalyzed.Analyzer = standard.Name queryAnalyzed.Operator = query.MatchQueryOperatorAnd // Make sure all terms from the query are matched @@ -1372,14 +1358,14 @@ func (b *bleveIndex) stopUpdaterAndCloseIndex() error { return b.index.Close() } -func (b *bleveIndex) UpdateIndex(ctx context.Context, reason string) (int64, error) { +func (b *bleveIndex) UpdateIndex(ctx context.Context) (int64, error) { // We don't have to do anything if the index cannot be updated (typically in tests). if b.updaterFn == nil { return 0, nil } // Use chan with buffer size 1 to ensure that we can always send the result back, even if there's no reader anymore. - req := updateRequest{reason: reason, callback: make(chan updateResult, 1)} + req := updateRequest{requestTime: time.Now(), callback: make(chan updateResult, 1)} // Make sure that the updater goroutine is running. b.updaterMu.Lock() @@ -1436,7 +1422,7 @@ func (b *bleveIndex) runUpdater(ctx context.Context) { b.updaterMu.Lock() for !b.updaterShutdown && ctx.Err() == nil && len(b.updaterQueue) == 0 && time.Since(start) < maxWait { - // Cond is signalled when updaterShutdown changes, updaterQueue gets new element or when timeout occurs. + // Cond is signaled when updaterShutdown changes, updaterQueue gets new element or when timeout occurs. b.updaterCond.Wait() } @@ -1459,6 +1445,26 @@ func (b *bleveIndex) runUpdater(ctx context.Context) { return } + // Check if requests arrived before minUpdateInterval since the last update has elapsed, and remove such requests. + for ix := 0; ix < len(batch); { + req := batch[ix] + if req.requestTime.Before(b.nextUpdateTime) { + req.callback <- updateResult{rv: b.resourceVersion} + batch = append(batch[:ix], batch[ix+1:]...) + } else { + // Keep in the batch + ix++ + } + } + + // If all requests are now handled, don't perform update. + if len(batch) == 0 { + continue + } + + // Bump next update time + b.nextUpdateTime = time.Now().Add(b.minUpdateInterval) + var rv int64 var err = ctx.Err() if err == nil { diff --git a/pkg/storage/unified/search/bleve_test.go b/pkg/storage/unified/search/bleve_test.go index 347195786a5..c0ad9c68fc0 100644 --- a/pkg/storage/unified/search/bleve_test.go +++ b/pkg/storage/unified/search/bleve_test.go @@ -14,9 +14,7 @@ import ( "testing" "time" - "github.com/Masterminds/semver" "github.com/blevesearch/bleve/v2" - authlib "github.com/grafana/authlib/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" @@ -24,6 +22,8 @@ import ( "go.uber.org/atomic" "go.uber.org/goleak" + authlib "github.com/grafana/authlib/types" + "github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/infra/log/logtest" @@ -653,7 +653,7 @@ type StubAccessClient struct { resourceResponses map[string]bool // key is the resource name, and bool if what the checker will return } -func (nc *StubAccessClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest) (authlib.CheckResponse, error) { +func (nc *StubAccessClient) Check(ctx context.Context, id authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) { return authlib.CheckResponse{Allowed: nc.resourceResponses[req.Resource]}, nil } @@ -827,30 +827,18 @@ func withRootDir(root string) setupOption { } } -func withBuildVersion(version string) setupOption { - return func(options *BleveOptions) { - options.BuildVersion = version - } -} - -func withMinBuildVersion(version *semver.Version) setupOption { - return func(options *BleveOptions) { - options.MinBuildVersion = version - } -} - -func withMaxFileIndexAge(maxAge time.Duration) setupOption { - return func(options *BleveOptions) { - options.MaxFileIndexAge = maxAge - } -} - func withOwnsIndexFn(fn func(key resource.NamespacedResource) (bool, error)) setupOption { return func(options *BleveOptions) { options.OwnsIndex = fn } } +func withIndexMinUpdateInterval(d time.Duration) setupOption { + return func(options *BleveOptions) { + options.IndexMinUpdateInterval = d + } +} + func TestBuildIndexExpiration(t *testing.T) { ns := resource.NamespacedResource{ Namespace: "test", @@ -915,8 +903,7 @@ func TestBuildIndexExpiration(t *testing.T) { backend.runEvictExpiredOrUnownedIndexes(time.Now().Add(5 * time.Minute)) if tc.expectedEviction { - idx, err := backend.GetIndex(context.Background(), ns) - require.NoError(t, err) + idx := backend.GetIndex(ns) require.Nil(t, idx) _, err = builtIndex.DocCount(context.Background(), "") @@ -925,8 +912,7 @@ func TestBuildIndexExpiration(t *testing.T) { // Verify that there are no open indexes. checkOpenIndexes(t, reg, 0, 0) } else { - idx, err := backend.GetIndex(context.Background(), ns) - require.NoError(t, err) + idx := backend.GetIndex(ns) require.NotNil(t, idx) cnt, err := builtIndex.DocCount(context.Background(), "") @@ -978,81 +964,39 @@ func TestBuildIndex(t *testing.T) { Resource: "resource", } - const alwaysRebuildDueToAge = 1 * time.Nanosecond - const neverRebuildDueToAge = 1 * time.Hour - for _, rebuild := range []bool{false, true} { - for _, version := range []string{"", "12.5.123"} { - for _, minBuildVersion := range []*semver.Version{nil, semver.MustParse("12.0.0"), semver.MustParse("13.0.0")} { - for _, maxIndexAge := range []time.Duration{0, alwaysRebuildDueToAge, neverRebuildDueToAge} { - shouldRebuild := rebuild - if minBuildVersion != nil { - shouldRebuild = shouldRebuild || version == "" || minBuildVersion.GreaterThan(semver.MustParse(version)) - } - if maxIndexAge > 0 { - shouldRebuild = shouldRebuild || maxIndexAge == alwaysRebuildDueToAge - } + testName := fmt.Sprintf("rebuild=%t", rebuild) - testName := "" - if shouldRebuild { - testName += "should REBUILD index" - } else { - testName += "should REUSE index" - } + t.Run(testName, func(t *testing.T) { + tmpDir := t.TempDir() - if rebuild { - testName += " when rebuild is true" - } else { - testName += " when rebuild is false" - } + const ( + firstIndexDocsCount = 10 + secondIndexDocsCount = 1000 + ) - if version != "" { - testName += " build version is " + version - } else { - testName += " build version is empty" - } - - if minBuildVersion != nil { - testName += " min build version is " + minBuildVersion.String() - } else { - testName += " min build version is nil" - } - - testName += " max index age is " + maxIndexAge.String() - - t.Run(testName, func(t *testing.T) { - tmpDir := t.TempDir() - - const ( - firstIndexDocsCount = 10 - secondIndexDocsCount = 1000 - ) - - { - backend, _ := setupBleveBackend(t, withFileThreshold(5), withRootDir(tmpDir), withBuildVersion(version)) - _, err := backend.BuildIndex(context.Background(), ns, firstIndexDocsCount, nil, "test", indexTestDocs(ns, firstIndexDocsCount, 100), nil, rebuild) - require.NoError(t, err) - backend.Stop() - } - - // Make sure we pass at least 1 nanosecond (alwaysRebuildDueToAge) to ensure that the index needs to be rebuild. - time.Sleep(1 * time.Millisecond) - - newBackend, _ := setupBleveBackend(t, withFileThreshold(5), withRootDir(tmpDir), withBuildVersion(version), withMinBuildVersion(minBuildVersion), withMaxFileIndexAge(maxIndexAge)) - idx, err := newBackend.BuildIndex(context.Background(), ns, secondIndexDocsCount, nil, "test", indexTestDocs(ns, secondIndexDocsCount, 100), nil, rebuild) - require.NoError(t, err) - - cnt, err := idx.DocCount(context.Background(), "") - require.NoError(t, err) - if shouldRebuild { - require.Equal(t, int64(secondIndexDocsCount), cnt, "Index has been not rebuilt") - } else { - require.Equal(t, int64(firstIndexDocsCount), cnt, "Index has not been reused") - } - }) - } + { + backend, _ := setupBleveBackend(t, withFileThreshold(5), withRootDir(tmpDir)) + _, err := backend.BuildIndex(context.Background(), ns, firstIndexDocsCount, nil, "test", indexTestDocs(ns, firstIndexDocsCount, 100), nil, rebuild) + require.NoError(t, err) + backend.Stop() } - } + + // Make sure we pass at least 1 nanosecond (alwaysRebuildDueToAge) to ensure that the index needs to be rebuild. + time.Sleep(1 * time.Millisecond) + + newBackend, _ := setupBleveBackend(t, withFileThreshold(5), withRootDir(tmpDir)) + idx, err := newBackend.BuildIndex(context.Background(), ns, secondIndexDocsCount, nil, "test", indexTestDocs(ns, secondIndexDocsCount, 100), nil, rebuild) + require.NoError(t, err) + + cnt, err := idx.DocCount(context.Background(), "") + require.NoError(t, err) + if rebuild { + require.Equal(t, int64(secondIndexDocsCount), cnt, "Index has been not rebuilt") + } else { + require.Equal(t, int64(firstIndexDocsCount), cnt, "Index has not been reused") + } + }) } } @@ -1192,6 +1136,37 @@ func updateTestDocs(ns resource.NamespacedResource, docs int) resource.UpdateFn } } +func updateTestDocsReturningMillisTimestamp(ns resource.NamespacedResource, docs int) (resource.UpdateFn, *atomic.Int64) { + cnt := 0 + updateCalls := atomic.NewInt64(0) + + return func(context context.Context, index resource.ResourceIndex, sinceRV int64) (newRV int64, updatedDocs int, _ error) { + now := time.Now() + updateCalls.Inc() + + cnt++ + + var items []*resource.BulkIndexItem + for i := 0; i < docs; i++ { + items = append(items, &resource.BulkIndexItem{ + Action: resource.ActionIndex, + Doc: &resource.IndexableDocument{ + Key: &resourcepb.ResourceKey{ + Namespace: ns.Namespace, + Group: ns.Group, + Resource: ns.Resource, + Name: fmt.Sprintf("doc%d", i), + }, + Title: fmt.Sprintf("Document %d (gen_%d)", i, cnt), + }, + }) + } + + err := index.BulkIndex(&resource.BulkIndexRequest{Items: items}) + return now.UnixMilli(), docs, err + }, updateCalls +} + func TestCleanOldIndexes(t *testing.T) { dir := t.TempDir() @@ -1269,7 +1244,7 @@ func TestIndexUpdate(t *testing.T) { require.Equal(t, int64(0), resp.TotalHits) // Update index. - _, err = idx.UpdateIndex(context.Background(), "test") + _, err = idx.UpdateIndex(context.Background()) require.NoError(t, err) // Verify that index was updated -- number of docs didn't change, but we can search "gen_1" documents now. @@ -1277,7 +1252,7 @@ func TestIndexUpdate(t *testing.T) { require.Equal(t, int64(5), searchTitle(t, idx, "gen_1", 10, ns).TotalHits) // Update index again. - _, err = idx.UpdateIndex(context.Background(), "test") + _, err = idx.UpdateIndex(context.Background()) require.NoError(t, err) // Verify that index was updated again -- we can search "gen_2" now. "gen_1" documents are gone. require.Equal(t, 10, docCount(t, idx)) @@ -1321,13 +1296,13 @@ func TestConcurrentIndexUpdateAndBuildIndex(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, err = idx.UpdateIndex(ctx, "test") + _, err = idx.UpdateIndex(ctx) require.NoError(t, err) _, err = be.BuildIndex(t.Context(), ns, 10 /* file based */, nil, "test", indexTestDocs(ns, 10, 100), updaterFn, false) require.NoError(t, err) - _, err = idx.UpdateIndex(ctx, "test") + _, err = idx.UpdateIndex(ctx) require.Contains(t, err.Error(), bleve.ErrorIndexClosed.Error()) } @@ -1363,10 +1338,8 @@ func TestConcurrentIndexUpdateSearchAndRebuild(t *testing.T) { case <-time.After(time.Duration(i) * time.Millisecond): // introduce small jitter } - idx, err := be.GetIndex(ctx, ns) - require.NoError(t, err) // GetIndex doesn't really return error. - - _, err = idx.UpdateIndex(ctx, "test") + idx := be.GetIndex(ns) + _, err = idx.UpdateIndex(ctx) if err != nil { if errors.Is(err, bleve.ErrorIndexClosed) || errors.Is(err, context.Canceled) { continue @@ -1413,7 +1386,7 @@ func TestConcurrentIndexUpdateSearchAndRebuild(t *testing.T) { cancel() wg.Wait() - fmt.Println("Updates:", updates.Load(), "searches:", searches.Load(), "rebuilds:", rebuilds.Load()) + t.Log("Updates:", updates.Load(), "searches:", searches.Load(), "rebuilds:", rebuilds.Load()) } // Verify concurrent updates and searches work as expected. @@ -1447,7 +1420,7 @@ func TestConcurrentIndexUpdateAndSearch(t *testing.T) { prevRV := int64(0) for ctx.Err() == nil { // We use t.Context() here to avoid getting errors from context cancellation. - rv, err := idx.UpdateIndex(t.Context(), "test") + rv, err := idx.UpdateIndex(t.Context()) require.NoError(t, err) require.Greater(t, rv, prevRV) // Each update should return new RV (that's how our update function works) require.Equal(t, int64(10), searchTitle(t, idx, "Document", 10, ns).TotalHits) @@ -1475,7 +1448,72 @@ func TestConcurrentIndexUpdateAndSearch(t *testing.T) { require.Greater(t, rvUpdatedByMultipleGoroutines, int64(0)) } -// Verify concurrent updates and searches work as expected. +func TestConcurrentIndexUpdateAndSearchWithIndexMinUpdateInterval(t *testing.T) { + ns := resource.NamespacedResource{ + Namespace: "test", + Group: "group", + Resource: "resource", + } + + const minInterval = 100 * time.Millisecond + be, _ := setupBleveBackend(t, withIndexMinUpdateInterval(minInterval)) + + updateFn, updateCalls := updateTestDocsReturningMillisTimestamp(ns, 5) + idx, err := be.BuildIndex(t.Context(), ns, 10 /* file based */, nil, "test", indexTestDocs(ns, 10, 100), updateFn, false) + require.NoError(t, err) + + wg := sync.WaitGroup{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + attemptedUpdates := atomic.NewInt64(0) + + // Verify that each returned RV (unix timestamp in millis) is either the same as before, or at least minInterval later. + const searchConcurrency = 25 + for i := 0; i < searchConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + prevRV := int64(0) + for ctx.Err() == nil { + attemptedUpdates.Inc() + + // We use t.Context() here to avoid getting errors from context cancellation. + rv, err := idx.UpdateIndex(t.Context()) + require.NoError(t, err) + + // Our update function returns unix timestamp in millis. We expect it to not change at all, or change by minInterval. + if prevRV > 0 { + rvDiff := rv - prevRV + if rvDiff == 0 { + // OK + } else { + // Allow returned RV to be within 10% of minInterval. + require.InDelta(t, minInterval.Milliseconds(), rvDiff, float64(minInterval.Milliseconds())*0.10) + } + } + + prevRV = rv + require.Equal(t, int64(10), searchTitle(t, idx, "Document", 10, ns).TotalHits) + } + }() + } + + // Run updates and searches for this time. + testTime := 1 * time.Second + + time.Sleep(testTime) + cancel() + wg.Wait() + + expectedUpdateCalls := int64(testTime / minInterval) + require.InDelta(t, expectedUpdateCalls, updateCalls.Load(), float64(expectedUpdateCalls/2)) + require.Greater(t, attemptedUpdates.Load(), updateCalls.Load()) + + t.Log("Attempted updates:", attemptedUpdates.Load(), "update calls:", updateCalls.Load()) +} + func TestIndexUpdateWithErrors(t *testing.T) { ns := resource.NamespacedResource{ Namespace: "test", @@ -1494,7 +1532,7 @@ func TestIndexUpdateWithErrors(t *testing.T) { require.NoError(t, err) t.Run("update fail", func(t *testing.T) { - _, err = idx.UpdateIndex(t.Context(), "test") + _, err = idx.UpdateIndex(t.Context()) require.ErrorIs(t, err, updateErr) }) @@ -1502,7 +1540,7 @@ func TestIndexUpdateWithErrors(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) defer cancel() - _, err = idx.UpdateIndex(ctx, "test") + _, err = idx.UpdateIndex(ctx) require.ErrorIs(t, err, context.DeadlineExceeded) }) @@ -1511,7 +1549,7 @@ func TestIndexUpdateWithErrors(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - _, err = idx.UpdateIndex(ctx, "test") + _, err = idx.UpdateIndex(ctx) require.ErrorIs(t, err, context.Canceled) }) } diff --git a/pkg/storage/unified/search/options.go b/pkg/storage/unified/search/options.go index a5bb1d52a8c..0d856b586c3 100644 --- a/pkg/storage/unified/search/options.go +++ b/pkg/storage/unified/search/options.go @@ -41,15 +41,14 @@ func NewSearchOptions( } bleve, err := NewBleveBackend(BleveOptions{ - Root: root, - FileThreshold: int64(cfg.IndexFileThreshold), // fewer than X items will use a memory index - BatchSize: cfg.IndexMaxBatchSize, // This is the batch size for how many objects to add to the index at once - IndexCacheTTL: cfg.IndexCacheTTL, // How long to keep the index cache in memory - BuildVersion: cfg.BuildVersion, - MaxFileIndexAge: cfg.MaxFileIndexAge, - MinBuildVersion: minVersion, - UseFullNgram: features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorageUseFullNgram), - OwnsIndex: ownsIndexFn, + Root: root, + FileThreshold: int64(cfg.IndexFileThreshold), // fewer than X items will use a memory index + BatchSize: cfg.IndexMaxBatchSize, // This is the batch size for how many objects to add to the index at once + IndexCacheTTL: cfg.IndexCacheTTL, // How long to keep the index cache in memory + BuildVersion: cfg.BuildVersion, + UseFullNgram: features.IsEnabledGlobally(featuremgmt.FlagUnifiedStorageUseFullNgram), + OwnsIndex: ownsIndexFn, + IndexMinUpdateInterval: cfg.IndexMinUpdateInterval, }, tracer, indexMetrics) if err != nil { @@ -57,11 +56,15 @@ func NewSearchOptions( } return resource.SearchOptions{ - Backend: bleve, - Resources: docs, - WorkerThreads: cfg.IndexWorkers, - InitMinCount: cfg.IndexMinCount, - RebuildInterval: cfg.IndexRebuildInterval, + Backend: bleve, + Resources: docs, + InitWorkerThreads: cfg.IndexWorkers, + IndexRebuildWorkers: cfg.IndexRebuildWorkers, + InitMinCount: cfg.IndexMinCount, + DashboardIndexMaxAge: cfg.IndexRebuildInterval, + MaxIndexAge: cfg.MaxFileIndexAge, + MinBuildVersion: minVersion, + IndexMinUpdateInterval: cfg.IndexMinUpdateInterval, }, nil } return resource.SearchOptions{}, nil diff --git a/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json b/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json index 5989ecb1804..a77725e2cc2 100644 --- a/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json +++ b/pkg/storage/unified/search/testdata/doc/dashboard-aaa-out.json @@ -26,7 +26,6 @@ "createdBy": "user:be2g71ke8yoe8b", "fields": { "ds_types": [ - "datasource", "my-custom-plugin" ], "errors_last_1_days": 1, @@ -47,12 +46,6 @@ "kind": "DataSource", "name": "DSUID" }, - { - "relation": "depends-on", - "group": "datasource", - "kind": "DataSource", - "name": "grafana" - }, { "relation": "depends-on", "group": "dashboards.grafana.app", diff --git a/pkg/storage/unified/sql/backend.go b/pkg/storage/unified/sql/backend.go index edbbe7760e0..1337bd7c2a9 100644 --- a/pkg/storage/unified/sql/backend.go +++ b/pkg/storage/unified/sql/backend.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/otel/trace/noop" "google.golang.org/protobuf/proto" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "github.com/grafana/grafana/pkg/util/sqlite" @@ -405,15 +406,18 @@ func (b *backend) update(ctx context.Context, event resource.WriteEvent) (int64, // Use rvManager.ExecWithRV instead of direct transaction rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) { // 1. Update resource - _, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ + res, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ SQLTemplate: sqltemplate.New(b.dialect), - WriteEvent: event, + WriteEvent: event, // includes the RV Folder: folder, GUID: event.GUID, }) if err != nil { return event.GUID, fmt.Errorf("resource update: %w", err) } + if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil { + return event.GUID, err + } // 2. Insert into resource history if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{ @@ -460,7 +464,7 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64, } rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) { // 1. delete from resource - _, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{ + res, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{ SQLTemplate: sqltemplate.New(b.dialect), WriteEvent: event, GUID: event.GUID, @@ -468,6 +472,9 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64, if err != nil { return event.GUID, fmt.Errorf("delete resource: %w", err) } + if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil { + return event.GUID, err + } // 2. Add event to resource history if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{ @@ -504,6 +511,28 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64, return rv, nil } +func (b *backend) checkConflict(res db.Result, key *resourcepb.ResourceKey, rv int64) error { + if rv == 0 { + return nil + } + + // The RV is part of the update request, and it may no longer be the most recent + rows, err := res.RowsAffected() + if err != nil { + return fmt.Errorf("unable to verify RV: %w", err) + } + if rows == 1 { + return nil // expected one result + } + if rows > 0 { + return fmt.Errorf("multiple rows effected (%d)", rows) + } + return apierrors.NewConflict(schema.GroupResource{ + Group: key.Group, + Resource: key.Resource, + }, key.Name, fmt.Errorf("resource version does not match current value")) +} + func (b *backend) ReadResource(ctx context.Context, req *resourcepb.ReadRequest) *resource.BackendReadResponse { _, span := b.tracer.Start(ctx, tracePrefix+".Read") defer span.End() diff --git a/pkg/storage/unified/sql/backend_test.go b/pkg/storage/unified/sql/backend_test.go index 534b393065e..65e64a76ac2 100644 --- a/pkg/storage/unified/sql/backend_test.go +++ b/pkg/storage/unified/sql/backend_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/DATA-DOG/go-sqlmock" - "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/pkg/storage/unified/sql/data/resource_delete.sql b/pkg/storage/unified/sql/data/resource_delete.sql index 5a11a97afaf..c621313ac3f 100644 --- a/pkg/storage/unified/sql/data/resource_delete.sql +++ b/pkg/storage/unified/sql/data/resource_delete.sql @@ -5,5 +5,6 @@ DELETE FROM {{ .Ident "resource" }} AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }} {{ if .WriteEvent.Key.Name }} AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }} + AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }} {{ end }} ; diff --git a/pkg/storage/unified/sql/data/resource_update.sql b/pkg/storage/unified/sql/data/resource_update.sql index 59ea5f4cdda..800de1a9fa6 100644 --- a/pkg/storage/unified/sql/data/resource_update.sql +++ b/pkg/storage/unified/sql/data/resource_update.sql @@ -10,4 +10,5 @@ UPDATE {{ .Ident "resource" }} AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }} AND {{ .Ident "namespace" }} = {{ .Arg .WriteEvent.Key.Namespace }} AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }} + AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }} ; diff --git a/pkg/storage/unified/sql/list_iterator_test.go b/pkg/storage/unified/sql/list_iterator_test.go index f9119b91b61..7edafd675a8 100644 --- a/pkg/storage/unified/sql/list_iterator_test.go +++ b/pkg/storage/unified/sql/list_iterator_test.go @@ -87,14 +87,23 @@ func TestIntegrationListIter(t *testing.T) { Group: item.group, Name: item.name, }, - Value: item.value, - PreviousRV: 0, + Value: item.value, }, }) if err != nil { return fmt.Errorf("failed to insert test data: %w", err) } - _, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ + + if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdateRV, sqlResourceUpdateRVRequest{ + SQLTemplate: sqltemplate.New(dialect), + GUIDToRV: map[string]int64{ + item.guid: item.resourceVersion, + }, + }); err != nil { + return fmt.Errorf("failed to insert test data: %w", err) + } + + if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{ SQLTemplate: sqltemplate.New(dialect), GUID: item.guid, ResourceVersion: item.resourceVersion, @@ -110,8 +119,7 @@ func TestIntegrationListIter(t *testing.T) { PreviousRV: item.resourceVersion, Type: 1, }, - }) - if err != nil { + }); err != nil { return fmt.Errorf("failed to insert resource version: %w", err) } } diff --git a/pkg/storage/unified/sql/queries_test.go b/pkg/storage/unified/sql/queries_test.go index 8cb7a328714..e8fdb31f084 100644 --- a/pkg/storage/unified/sql/queries_test.go +++ b/pkg/storage/unified/sql/queries_test.go @@ -31,6 +31,21 @@ func TestUnifiedStorageQueries(t *testing.T) { }, }, }, + { + Name: "with rv", + Data: &sqlResourceRequest{ + SQLTemplate: mocks.NewTestingSQLTemplate(), + WriteEvent: resource.WriteEvent{ + Key: &resourcepb.ResourceKey{ + Namespace: "nn", + Group: "gg", + Resource: "rr", + Name: "name", + }, + PreviousRV: 1234, + }, + }, + }, }, sqlResourceInsert: { { @@ -63,6 +78,7 @@ func TestUnifiedStorageQueries(t *testing.T) { Resource: "rr", Name: "name", }, + PreviousRV: 1759304090100678, }, Folder: "fldr", }, diff --git a/pkg/storage/unified/sql/rv_manager.go b/pkg/storage/unified/sql/rv_manager.go index 0ffce47b259..4c6a092d937 100644 --- a/pkg/storage/unified/sql/rv_manager.go +++ b/pkg/storage/unified/sql/rv_manager.go @@ -263,7 +263,7 @@ func (m *resourceVersionManager) execBatch(ctx context.Context, group, resource attribute.Int("operation_index", i), attribute.String("error", err.Error()), )) - return fmt.Errorf("failed to execute function: %w", err) + return err } guids[i] = guid } diff --git a/pkg/storage/unified/sql/service.go b/pkg/storage/unified/sql/service.go index 6dd851ad74f..79d4615f53c 100644 --- a/pkg/storage/unified/sql/service.go +++ b/pkg/storage/unified/sql/service.go @@ -6,10 +6,12 @@ import ( "fmt" "hash/fnv" "net" + "net/http" "os" "strconv" "time" + "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.opentelemetry.io/otel" @@ -94,6 +96,7 @@ func ProvideUnifiedStorageGrpcService( indexMetrics *resource.BleveIndexMetrics, searchRing *ring.Ring, memberlistKVConfig kv.Config, + httpServerRouter *mux.Router, ) (UnifiedStorageGrpcService, error) { var err error tracer := otel.Tracer("unified-storage") @@ -159,6 +162,10 @@ func ProvideUnifiedStorageGrpcService( s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true) subservices = append(subservices, s.ringLifecycler) + + if httpServerRouter != nil { + httpServerRouter.Path("/prepare-downscale").Methods("GET", "POST", "DELETE").Handler(http.HandlerFunc(s.PrepareDownscale)) + } } if cfg.QOSEnabled { @@ -194,6 +201,21 @@ func ProvideUnifiedStorageGrpcService( return s, nil } +func (s *service) PrepareDownscale(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + s.log.Info("Preparing for downscale. Will not keep instance in ring on shutdown.") + s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(false) + case http.MethodDelete: + s.log.Info("Downscale canceled. Will keep instance in ring on shutdown.") + s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true) + case http.MethodGet: + // used for delayed downscale use case, which we don't support. Leaving here for completion sake + s.log.Info("Received GET request for prepare-downscale. Behavior not implemented.") + default: + } +} + var ( // operation used by the search-servers to check if they own the namespace searchOwnerRead = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, nil) diff --git a/pkg/storage/unified/sql/test/integration_test.go b/pkg/storage/unified/sql/test/integration_test.go index c5420b71280..347a28c4f11 100644 --- a/pkg/storage/unified/sql/test/integration_test.go +++ b/pkg/storage/unified/sql/test/integration_test.go @@ -128,7 +128,7 @@ func TestClientServer(t *testing.T) { features := featuremgmt.WithFeatures() - svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}) + svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil) require.NoError(t, err) var client resourcepb.ResourceStoreClient diff --git a/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql b/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql index a44f4bb6cc7..80f5c76156a 100755 --- a/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql +++ b/pkg/storage/unified/sql/testdata/mysql--resource_delete-simple.sql @@ -4,4 +4,5 @@ DELETE FROM `resource` AND `group` = 'gg' AND `resource` = 'rr' AND `name` = 'name' + AND `resource_version` = 0 ; diff --git a/pkg/storage/unified/sql/testdata/mysql--resource_delete-with rv.sql b/pkg/storage/unified/sql/testdata/mysql--resource_delete-with rv.sql new file mode 100755 index 00000000000..f97a8f2867d --- /dev/null +++ b/pkg/storage/unified/sql/testdata/mysql--resource_delete-with rv.sql @@ -0,0 +1,8 @@ +DELETE FROM `resource` + WHERE 1 = 1 + AND `namespace` = 'nn' + AND `group` = 'gg' + AND `resource` = 'rr' + AND `name` = 'name' + AND `resource_version` = 1234 +; diff --git a/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql b/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql index e54cda2c0d7..e28f089b91c 100755 --- a/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql +++ b/pkg/storage/unified/sql/testdata/mysql--resource_update-single path.sql @@ -10,4 +10,5 @@ UPDATE `resource` AND `resource` = 'rr' AND `namespace` = 'nn' AND `name` = 'name' + AND `resource_version` = 1759304090100678 ; diff --git a/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql b/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql index f7fb550b42b..3070bdf23f9 100755 --- a/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql +++ b/pkg/storage/unified/sql/testdata/postgres--resource_delete-simple.sql @@ -4,4 +4,5 @@ DELETE FROM "resource" AND "group" = 'gg' AND "resource" = 'rr' AND "name" = 'name' + AND "resource_version" = 0 ; diff --git a/pkg/storage/unified/sql/testdata/postgres--resource_delete-with rv.sql b/pkg/storage/unified/sql/testdata/postgres--resource_delete-with rv.sql new file mode 100755 index 00000000000..da4c4cfe65d --- /dev/null +++ b/pkg/storage/unified/sql/testdata/postgres--resource_delete-with rv.sql @@ -0,0 +1,8 @@ +DELETE FROM "resource" + WHERE 1 = 1 + AND "namespace" = 'nn' + AND "group" = 'gg' + AND "resource" = 'rr' + AND "name" = 'name' + AND "resource_version" = 1234 +; diff --git a/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql b/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql index 2b549ed8925..bedf09f15e0 100755 --- a/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql +++ b/pkg/storage/unified/sql/testdata/postgres--resource_update-single path.sql @@ -10,4 +10,5 @@ UPDATE "resource" AND "resource" = 'rr' AND "namespace" = 'nn' AND "name" = 'name' + AND "resource_version" = 1759304090100678 ; diff --git a/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql index f7fb550b42b..3070bdf23f9 100755 --- a/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql +++ b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-simple.sql @@ -4,4 +4,5 @@ DELETE FROM "resource" AND "group" = 'gg' AND "resource" = 'rr' AND "name" = 'name' + AND "resource_version" = 0 ; diff --git a/pkg/storage/unified/sql/testdata/sqlite--resource_delete-with rv.sql b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-with rv.sql new file mode 100755 index 00000000000..da4c4cfe65d --- /dev/null +++ b/pkg/storage/unified/sql/testdata/sqlite--resource_delete-with rv.sql @@ -0,0 +1,8 @@ +DELETE FROM "resource" + WHERE 1 = 1 + AND "namespace" = 'nn' + AND "group" = 'gg' + AND "resource" = 'rr' + AND "name" = 'name' + AND "resource_version" = 1234 +; diff --git a/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql b/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql index 2b549ed8925..bedf09f15e0 100755 --- a/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql +++ b/pkg/storage/unified/sql/testdata/sqlite--resource_update-single path.sql @@ -10,4 +10,5 @@ UPDATE "resource" AND "resource" = 'rr' AND "namespace" = 'nn' AND "name" = 'name' + AND "resource_version" = 1759304090100678 ; diff --git a/pkg/storage/unified/testing/search_and_storage.go b/pkg/storage/unified/testing/search_and_storage.go index 5cb4d13488c..fa2e438a377 100644 --- a/pkg/storage/unified/testing/search_and_storage.go +++ b/pkg/storage/unified/testing/search_and_storage.go @@ -226,6 +226,29 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource require.NoError(t, err) require.NotNil(t, searchResp) require.Nil(t, searchResp.Error) + // finding a document by its tag using the query field is not supported anymore, so should return nothing here + // https://github.com/grafana/grafana/pull/111842 + require.Equal(t, int64(0), searchResp.TotalHits) + + // this is the correct way of searching by tag + searchResp, err = server.Search(ctx, &resourcepb.ResourceSearchRequest{ + Options: &resourcepb.ListOptions{ + Key: &resourcepb.ResourceKey{ + Group: "test.grafana.app", + Resource: "testresources", + Namespace: nsPrefix, + }, + Fields: []*resourcepb.Requirement{{ + Key: "tags", + Operator: "=", + Values: []string{"hello"}, + }}, + }, + Limit: 10, + }) + require.NoError(t, err) + require.NotNil(t, searchResp) + require.Nil(t, searchResp.Error) require.Equal(t, int64(3), searchResp.TotalHits) }) @@ -244,6 +267,29 @@ func RunTestSearchAndStorage(t *testing.T, ctx context.Context, backend resource require.NoError(t, err) require.NotNil(t, searchResp) require.Nil(t, searchResp.Error) + // finding a document by its tag using the query field is not supported anymore, so should return nothing here + // https://github.com/grafana/grafana/pull/111842 + require.Equal(t, int64(0), searchResp.TotalHits) + + // this is the correct way of searching by tag + searchResp, err = server.Search(ctx, &resourcepb.ResourceSearchRequest{ + Options: &resourcepb.ListOptions{ + Key: &resourcepb.ResourceKey{ + Group: "test.grafana.app", + Resource: "testresources", + Namespace: nsPrefix, + }, + Fields: []*resourcepb.Requirement{{ + Key: "tags", + Operator: "=", + Values: []string{"tag1"}, + }}, + }, + Limit: 10, + }) + require.NoError(t, err) + require.NotNil(t, searchResp) + require.Nil(t, searchResp.Error) require.Equal(t, int64(1), searchResp.TotalHits) }) } diff --git a/pkg/storage/unified/testing/search_backend.go b/pkg/storage/unified/testing/search_backend.go index 301a3a202e4..4ef4b6d2753 100644 --- a/pkg/storage/unified/testing/search_backend.go +++ b/pkg/storage/unified/testing/search_backend.go @@ -59,12 +59,11 @@ func runTestSearchBackendBuildIndex(t *testing.T, backend resource.SearchBackend } // Get the index should return nil if the index does not exist - index, err := backend.GetIndex(ctx, ns) - require.NoError(t, err) + index := backend.GetIndex(ns) require.Nil(t, index) // Build the index - index, err = backend.BuildIndex(ctx, ns, 0, nil, "test", func(index resource.ResourceIndex) (int64, error) { + index, err := backend.BuildIndex(ctx, ns, 0, nil, "test", func(index resource.ResourceIndex) (int64, error) { // Write a test document err := index.BulkIndex(&resource.BulkIndexRequest{ Items: []*resource.BulkIndexItem{ @@ -91,8 +90,7 @@ func runTestSearchBackendBuildIndex(t *testing.T, backend resource.SearchBackend require.NotNil(t, index) // Get the index should now return the index - index, err = backend.GetIndex(ctx, ns) - require.NoError(t, err) + index = backend.GetIndex(ns) require.NotNil(t, index) } @@ -164,14 +162,18 @@ func runTestResourceIndex(t *testing.T, backend resource.SearchBackend, nsPrefix Group: ns.Group, Resource: ns.Resource, }, + Fields: []*resourcepb.Requirement{{ + Key: "tags", + Operator: "=", + Values: []string{"tag3"}, + }}, }, Fields: []string{"title", "folder", "tags"}, - Query: "tag3", Limit: 10, }, nil) require.NoError(t, err) require.NotNil(t, resp) - require.Equal(t, int64(1), resp.TotalHits) // Only doc3 should have tag3 now + require.Equal(t, int64(1), resp.TotalHits) // Only doc2 should have tag3 now // Search for Document resp, err = index.Search(ctx, nil, &resourcepb.ResourceSearchRequest{ diff --git a/pkg/storage/unified/testing/server.go b/pkg/storage/unified/testing/server.go index 5916ee3cbb4..a83a7d13564 100644 --- a/pkg/storage/unified/testing/server.go +++ b/pkg/storage/unified/testing/server.go @@ -125,12 +125,15 @@ func runTestResourcePermissionScenarios(t *testing.T, backend resource.StorageBa resourceUID := fmt.Sprintf("test123-%d", i) // Create a mock access client with the test case's permission map - checksPerformed := []types.CheckRequest{} + checksPerformed := []CheckRequestEX{} mockAccess := &mockAccessClient{ allowed: false, // Default to false allowedMap: tc.permissionMap, - checkFn: func(req types.CheckRequest) { - checksPerformed = append(checksPerformed, req) + checkFn: func(req types.CheckRequest, folder string) { + checksPerformed = append(checksPerformed, CheckRequestEX{ + CheckRequest: req, + Folder: folder, + }) }, } @@ -167,7 +170,7 @@ func runTestResourcePermissionScenarios(t *testing.T, backend resource.StorageBa } }`, resourceName, resourceUID, nsPrefix+"-ns1", tc.initialFolder, i) - checksPerformed = []types.CheckRequest{} + checksPerformed = []CheckRequestEX{} created, err := server.Create(ctx, &resourcepb.CreateRequest{ Value: []byte(resourceJSON), Key: key, @@ -232,7 +235,7 @@ func runTestResourcePermissionScenarios(t *testing.T, backend resource.StorageBa }`, resourceName, resourceUID, nsPrefix+"-ns1", tc.targetFolder, i) mockAccess.allowed = false // Reset to use the map - checksPerformed = []types.CheckRequest{} + checksPerformed = []CheckRequestEX{} updated, err := server.Update(ctx, &resourcepb.UpdateRequest{ Key: key, @@ -494,18 +497,18 @@ func runTestListTrashAccessControl(t *testing.T, backend resource.StorageBackend type mockAccessClient struct { allowed bool allowedMap map[string]bool - checkFn func(types.CheckRequest) + checkFn func(types.CheckRequest, string) compileFn func(user types.AuthInfo, req types.ListRequest) types.ItemChecker } -func (m *mockAccessClient) Check(ctx context.Context, user types.AuthInfo, req types.CheckRequest) (types.CheckResponse, error) { +func (m *mockAccessClient) Check(ctx context.Context, user types.AuthInfo, req types.CheckRequest, folder string) (types.CheckResponse, error) { if m.checkFn != nil { - m.checkFn(req) + m.checkFn(req, folder) } // Check specific folder:verb mappings if provided if m.allowedMap != nil { - key := fmt.Sprintf("%s:%s", req.Folder, req.Verb) + key := fmt.Sprintf("%s:%s", folder, req.Verb) if allowed, exists := m.allowedMap[key]; exists { return types.CheckResponse{Allowed: allowed}, nil } @@ -526,3 +529,8 @@ func (m *mockAccessClient) Compile(ctx context.Context, user types.AuthInfo, req return m.allowed }, types.NoopZookie{}, nil } + +type CheckRequestEX struct { + types.CheckRequest + Folder string +} diff --git a/pkg/storage/unified/testing/storage_backend.go b/pkg/storage/unified/testing/storage_backend.go index 1a13c310edd..0dde7344f18 100644 --- a/pkg/storage/unified/testing/storage_backend.go +++ b/pkg/storage/unified/testing/storage_backend.go @@ -124,13 +124,13 @@ func runTestIntegrationBackendHappyPath(t *testing.T, backend resource.StorageBa }) t.Run("Update item2", func(t *testing.T) { - rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2)) require.NoError(t, err) require.Greater(t, rv4, rv3) }) t.Run("Delete item1", func(t *testing.T) { - rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rv5, rv4) }) @@ -352,10 +352,10 @@ func runTestIntegrationBackendList(t *testing.T, backend resource.StorageBackend rv5, err := writeEvent(ctx, backend, "item5", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv5, rv4) - rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2)) require.NoError(t, err) require.Greater(t, rv6, rv5) - rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv3)) require.NoError(t, err) require.Greater(t, rv7, rv6) rv8, err := writeEvent(ctx, backend, "item6", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) @@ -490,10 +490,10 @@ func runTestIntegrationBackendListModifiedSince(t *testing.T, backend resource.S ns := nsPrefix + "-history-ns" rvCreated, _ := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.Greater(t, rvCreated, int64(0)) - rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvCreated)) require.NoError(t, err) require.Greater(t, rvUpdated, rvCreated) - rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rvUpdated)) require.NoError(t, err) require.Greater(t, rvDeleted, rvUpdated) @@ -610,19 +610,19 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage require.Greater(t, rv1, int64(0)) // add 5 events for item1 - should be saved to history - rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rvHistory1, rv1) - rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory1)) require.NoError(t, err) require.Greater(t, rvHistory2, rvHistory1) - rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory2)) require.NoError(t, err) require.Greater(t, rvHistory3, rvHistory2) - rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory3)) require.NoError(t, err) require.Greater(t, rvHistory4, rvHistory3) - rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory4)) require.NoError(t, err) require.Greater(t, rvHistory5, rvHistory4) @@ -804,8 +804,9 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage resourceVersions = append(resourceVersions, initialRV) // Create 9 more versions with modifications + rv := initialRV for i := 0; i < 9; i++ { - rv, err := writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns2)) + rv, err = writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns2, rv)) require.NoError(t, err) resourceVersions = append(resourceVersions, rv) } @@ -907,7 +908,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage // Create a resource and delete it rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) - rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv)) require.NoError(t, err) require.Greater(t, rvDeleted, rv) @@ -932,7 +933,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage // Create a resource and delete it rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) - rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv)) require.NoError(t, err) require.Greater(t, rvDeleted, rv) @@ -940,7 +941,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage rv1, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv1, rvDeleted) - rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns)) + rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rv2, rv1) @@ -983,8 +984,8 @@ func runTestIntegrationBackendListHistoryErrorReporting(t *testing.T, backend re const events = 500 prevRv := origRv - for i := 0; i < events; i++ { - rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespace(ns), WithGroup(group), WithResource(resourceName)) + for range events { + rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, prevRv), WithGroup(group), WithResource(resourceName)) require.NoError(t, err) require.Greater(t, rv, prevRv) prevRv = rv @@ -1029,9 +1030,9 @@ func runTestIntegrationBlobSupport(t *testing.T, backend resource.StorageBackend t.Run("put and fetch blob", func(t *testing.T) { key := &resourcepb.ResourceKey{ Namespace: ns, - Group: "g", - Resource: "r", - Name: "n", + Group: "ggg", + Resource: "rrr", + Name: "nnn", } b1, err := server.PutBlob(ctx, &resourcepb.PutBlobRequest{ Resource: key, @@ -1131,6 +1132,14 @@ func runTestIntegrationBackendCreateNewResource(t *testing.T, backend resource.S // WriteEventOption is a function that modifies WriteEventOptions type WriteEventOption func(*WriteEventOptions) +// WithNamespace sets the namespace for the write event +func WithNamespaceAndRV(namespace string, rv int64) WriteEventOption { + return func(o *WriteEventOptions) { + o.Namespace = namespace + o.PreviousRV = rv + } +} + // WithNamespace sets the namespace for the write event func WithNamespace(namespace string) WriteEventOption { return func(o *WriteEventOptions) { @@ -1180,11 +1189,12 @@ func WithValue(value string) WriteEventOption { } type WriteEventOptions struct { - Namespace string - Group string - Resource string - Folder string - Value []byte + Namespace string + Group string + Resource string + Folder string + Value []byte + PreviousRV int64 } func writeEvent(ctx context.Context, store resource.StorageBackend, name string, action resourcepb.WatchEvent_Type, opts ...WriteEventOption) (int64, error) { @@ -1236,6 +1246,7 @@ func writeEvent(ctx context.Context, store resource.StorageBackend, name string, Resource: options.Resource, Name: name, }, + PreviousRV: options.PreviousRV, } switch action { case resourcepb.WatchEvent_DELETED: @@ -1285,18 +1296,15 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken rv1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv1, int64(0)) - rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1)) require.NoError(t, err) require.Greater(t, rvDelete1, rv1) - rvDelete2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) - require.NoError(t, err) - require.Greater(t, rvDelete2, rvDelete1) // item2 deleted and recreated, should not be returned in trash rv2, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) require.NoError(t, err) require.Greater(t, rv2, int64(0)) - rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespace(ns)) + rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv2)) require.NoError(t, err) require.Greater(t, rvDelete3, rv2) rv3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns)) @@ -1325,10 +1333,10 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken }, }, }, - expectedVersions: []int64{rvDelete2}, + expectedVersions: []int64{rvDelete1}, expectedValues: []string{"item1 DELETED"}, - minExpectedHeadRV: rvDelete2, - expectedContinueRV: rvDelete2, + minExpectedHeadRV: rvDelete1, + expectedContinueRV: rvDelete1, expectedSortAsc: false, }, { diff --git a/pkg/tests/api/alerting/test-data/provisioning-mixed-set.yaml b/pkg/tests/api/alerting/test-data/provisioning-mixed-set.yaml index b8845d29548..41b63fc2ed6 100644 --- a/pkg/tests/api/alerting/test-data/provisioning-mixed-set.yaml +++ b/pkg/tests/api/alerting/test-data/provisioning-mixed-set.yaml @@ -144,16 +144,17 @@ policies: - label_keys_not_$$escaped # a list of prometheus-like matchers that an alert rule has to fulfill to match the node (allowed chars # [a-zA-Z_:]) - matchers: - - alertname = Watchdog - - service_id_X = serviceX - - severity =~ "warning|critical" - # a list of grafana-like matchers that an alert rule has to fulfill to match the node - object_matchers: - - ["alertname", "=", "CPUUsage"] - - ["service_id-X", "=", "serviceX"] - - ["severity", "=~", "warning|critical"] group_wait: 30s group_interval: 5m repeat_interval: 4h - routes: [] + routes: + - matchers: + - alertname = Watchdog + - service_id_X = serviceX + - severity =~ "warning|critical" + # a list of grafana-like matchers that an alert rule has to fulfill to match the node + object_matchers: + - [ "alertname", "=", "CPUUsage" ] + - [ "service_id-X", "=", "serviceX" ] + - [ "severity", "=~", "warning|critical" ] + diff --git a/pkg/tests/api/graphite/graphite_test.go b/pkg/tests/api/graphite/graphite_test.go index 29949eb535b..9dd44d47f72 100644 --- a/pkg/tests/api/graphite/graphite_test.go +++ b/pkg/tests/api/graphite/graphite_test.go @@ -85,7 +85,7 @@ func TestIntegrationGraphite(t *testing.T) { // nolint:gosec resp, err := http.Post(u, "application/json", buf1) require.NoError(t, err) - require.Equal(t, http.StatusInternalServerError, resp.StatusCode) + require.Equal(t, http.StatusBadRequest, resp.StatusCode) t.Cleanup(func() { err := resp.Body.Close() require.NoError(t, err) diff --git a/pkg/tests/api/plugins/data/expectedListResp.json b/pkg/tests/api/plugins/data/expectedListResp.json index ae511c7eb77..78d56e06675 100644 --- a/pkg/tests/api/plugins/data/expectedListResp.json +++ b/pkg/tests/api/plugins/data/expectedListResp.json @@ -209,7 +209,7 @@ "path": "public/plugins/grafana-azure-monitor-datasource/img/azure_monitor_cpu.png" } ], - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": [ "azure", @@ -880,7 +880,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -934,7 +934,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": [ "grafana", @@ -1000,7 +1000,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1217,7 +1217,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1325,7 +1325,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1375,7 +1375,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1425,7 +1425,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -1629,7 +1629,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": [ "grafana", @@ -1734,12 +1734,12 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, "dependencies": { - "grafanaDependency": "", + "grafanaDependency": "\u003e=11.6.0", "grafanaVersion": "*", "plugins": [], "extensions": { @@ -2042,7 +2042,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -2092,7 +2092,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, @@ -2445,7 +2445,7 @@ }, "build": {}, "screenshots": null, - "version": "12.2.0-pre", + "version": "12.3.0-pre", "updated": "", "keywords": null }, diff --git a/pkg/tests/apis/alerting/notifications/receivers/receiver_test.go b/pkg/tests/apis/alerting/notifications/receivers/receiver_test.go index 1d34b5106c8..da834b690e8 100644 --- a/pkg/tests/apis/alerting/notifications/receivers/receiver_test.go +++ b/pkg/tests/apis/alerting/notifications/receivers/receiver_test.go @@ -9,10 +9,11 @@ import ( "net/http" "path" "slices" - "sort" "strings" "testing" + "github.com/grafana/alerting/notify/notifytest" + "github.com/grafana/alerting/receivers/line" "github.com/grafana/alerting/receivers/schema" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1225,7 +1226,7 @@ func TestIntegrationCRUD(t *testing.T) { t.Run("should be able to update default receiver", func(t *testing.T) { require.NotNil(t, defaultReceiver) newDefault := defaultReceiver.Copy().(*v0alpha1.Receiver) - newDefault.Spec.Integrations = append(newDefault.Spec.Integrations, createIntegration(t, "line")) + newDefault.Spec.Integrations = append(newDefault.Spec.Integrations, createIntegration(t, line.Type)) updatedReceiver, err := adminClient.Update(ctx, newDefault, v1.UpdateOptions{}) require.NoError(t, err) @@ -1266,10 +1267,10 @@ func TestIntegrationCRUD(t *testing.T) { var receiver *v0alpha1.Receiver t.Run("should correctly persist all known integrations", func(t *testing.T) { - integrations := make([]v0alpha1.ReceiverIntegration, 0, len(notify.AllKnownConfigsForTesting)) - keysIter := maps.Keys(notify.AllKnownConfigsForTesting) + integrations := make([]v0alpha1.ReceiverIntegration, 0, len(notifytest.AllKnownV1ConfigsForTesting)) + keysIter := maps.Keys(notifytest.AllKnownV1ConfigsForTesting) keys := slices.Collect(keysIter) - sort.Strings(keys) + slices.Sort(keys) for _, key := range keys { integrations = append(integrations, createIntegration(t, key)) } @@ -1300,7 +1301,7 @@ func TestIntegrationCRUD(t *testing.T) { export := legacyCli.ExportReceiverTyped(t, receiver.Spec.Title, true) for _, integration := range export.Receivers { - expected := notify.AllKnownConfigsForTesting[strings.ToLower(integration.Type)] // to lower because there is LINE that is in different casing in API + expected := notifytest.AllKnownV1ConfigsForTesting[schema.IntegrationType(integration.Type)] assert.JSONEqf(t, expected.Config, string(integration.Settings), "integration %s", integration.Type) } }) @@ -1313,7 +1314,7 @@ func TestIntegrationCRUD(t *testing.T) { for _, integration := range get.Spec.Integrations { integrationType := schema.IntegrationType(integration.Type) t.Run(integration.Type, func(t *testing.T) { - expected := notify.AllKnownConfigsForTesting[strings.ToLower(integration.Type)] + expected := notifytest.AllKnownV1ConfigsForTesting[schema.IntegrationType(integration.Type)] var fields map[string]any require.NoError(t, json.Unmarshal([]byte(expected.Config), &fields)) typeSchema, ok := notify.GetSchemaVersionForIntegration(integrationType, schema.V1) @@ -1336,11 +1337,11 @@ func TestIntegrationCRUD(t *testing.T) { }) t.Run("should fail to persist receiver with invalid config", func(t *testing.T) { - keysIter := maps.Keys(notify.AllKnownConfigsForTesting) + keysIter := maps.Keys(notifytest.AllKnownV1ConfigsForTesting) keys := slices.Collect(keysIter) - sort.Strings(keys) + slices.Sort(keys) for _, key := range keys { - t.Run(key, func(t *testing.T) { + t.Run(string(key), func(t *testing.T) { integration := createIntegration(t, key) // Make the integration invalid, so it fails to create. This is usually done by sending empty settings. clear(integration.Settings) @@ -1503,18 +1504,18 @@ func persistInitialConfig(t *testing.T, amConfig definitions.PostableUserConfig) require.NoError(t, err) } -func createIntegration(t *testing.T, integrationType string) v0alpha1.ReceiverIntegration { - cfg, ok := notify.AllKnownConfigsForTesting[integrationType] +func createIntegration(t *testing.T, integrationType schema.IntegrationType) v0alpha1.ReceiverIntegration { + cfg, ok := notifytest.AllKnownV1ConfigsForTesting[integrationType] require.Truef(t, ok, "no known config for integration type %s", integrationType) - return createIntegrationWithSettings(t, integrationType, "v1", cfg.Config) + return createIntegrationWithSettings(t, integrationType, schema.V1, cfg.Config) } -func createIntegrationWithSettings(t *testing.T, integrationType string, integrationVersion string, settingsJson string) v0alpha1.ReceiverIntegration { +func createIntegrationWithSettings(t *testing.T, integrationType schema.IntegrationType, integrationVersion schema.Version, settingsJson string) v0alpha1.ReceiverIntegration { settings := common.Unstructured{} require.NoError(t, settings.UnmarshalJSON([]byte(settingsJson))) return v0alpha1.ReceiverIntegration{ Settings: settings.Object, - Type: integrationType, - Version: integrationVersion, + Type: string(integrationType), + Version: string(integrationVersion), DisableResolveMessage: util.Pointer(false), } } diff --git a/pkg/tests/apis/dashboard/dashboards_test.go b/pkg/tests/apis/dashboard/dashboards_test.go index 930356e18c1..e6739330286 100644 --- a/pkg/tests/apis/dashboard/dashboards_test.go +++ b/pkg/tests/apis/dashboard/dashboards_test.go @@ -15,6 +15,10 @@ import ( "k8s.io/client-go/dynamic" k8srest "k8s.io/client-go/rest" + dashboardV0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1" + dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1" + dashboardV2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1" + dashboardV2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1" "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apiserver/rest" @@ -24,11 +28,6 @@ import ( "github.com/grafana/grafana/pkg/tests/apis" "github.com/grafana/grafana/pkg/tests/testinfra" "github.com/grafana/grafana/pkg/tests/testsuite" - - dashboardV0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1" - dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1" - dashboardV2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1" - dashboardV2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1" "github.com/grafana/grafana/pkg/util/testutil" ) diff --git a/pkg/tests/apis/folder/folders_test.go b/pkg/tests/apis/folder/folders_test.go index d7f64aea418..3449f50372c 100644 --- a/pkg/tests/apis/folder/folders_test.go +++ b/pkg/tests/apis/folder/folders_test.go @@ -8,7 +8,9 @@ import ( "net/http" "slices" "testing" + "time" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -18,14 +20,17 @@ import ( folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1" "github.com/grafana/grafana/pkg/api/dtos" grafanarest "github.com/grafana/grafana/pkg/apiserver/rest" + "github.com/grafana/grafana/pkg/expr" "github.com/grafana/grafana/pkg/infra/db" "github.com/grafana/grafana/pkg/services/accesscontrol/resourcepermissions" "github.com/grafana/grafana/pkg/services/dashboards" "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/folder" + apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions" "github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/user" "github.com/grafana/grafana/pkg/setting" + alerting "github.com/grafana/grafana/pkg/tests/api/alerting" "github.com/grafana/grafana/pkg/tests/apis" "github.com/grafana/grafana/pkg/tests/testinfra" "github.com/grafana/grafana/pkg/tests/testsuite" @@ -223,6 +228,89 @@ func TestIntegrationFoldersApp(t *testing.T) { }) } +// Validates that folder delete checks alert_rule stats and blocks deletion +func TestIntegrationFolderDeletionBlockedByAlertRules(t *testing.T) { + testutil.SkipIntegrationTestInShortMode(t) + + if !db.IsTestDbSQLite() { + t.Skip("test only on sqlite for now") + } + + t.Run("should be blocked by alert rules", func(t *testing.T) { + helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{ + AppModeProduction: true, + DisableAnonymous: true, + APIServerStorageType: "unified", + UnifiedStorageConfig: map[string]setting.UnifiedStorageConfig{ + folders.RESOURCEGROUP: {DualWriterMode: grafanarest.Mode5}, + }, + EnableFeatureToggles: []string{ + featuremgmt.FlagUnifiedStorageSearch, + }, + }) + + client := helper.GetResourceClient(apis.ResourceClientArgs{ + User: helper.Org1.Admin, + GVR: gvr, + }) + + // Create a folder via legacy API so it is visible everywhere. + folderUID := "alertrule-del-test" + legacyPayload := fmt.Sprintf(`{"title": "Folder With Alert Rule", "uid": "%s"}`, folderUID) + legacyCreate := apis.DoRequest(helper, apis.RequestParams{ + User: client.Args.User, + Method: http.MethodPost, + Path: "/api/folders", + Body: []byte(legacyPayload), + }, &folder.Folder{}) + require.NotNil(t, legacyCreate.Result) + require.Equal(t, folderUID, legacyCreate.Result.UID) + + // Create one alert rule in that folder namespace via ruler API. + addr := helper.GetEnv().Server.HTTPServer.Listener.Addr().String() + api := alerting.NewAlertingLegacyAPIClient(addr, "admin", "admin") + + // simple always-true rule + forDuration := model.Duration(10 * time.Second) + rule := apimodels.PostableExtendedRuleNode{ + ApiRuleNode: &apimodels.ApiRuleNode{For: &forDuration}, + GrafanaManagedAlert: &apimodels.PostableGrafanaRule{ + Title: "rule-in-folder", + Condition: "A", + Data: []apimodels.AlertQuery{ + { + RefID: "A", + DatasourceUID: expr.DatasourceUID, + RelativeTimeRange: apimodels.RelativeTimeRange{ + From: apimodels.Duration(600 * time.Second), + To: 0, + }, + Model: json.RawMessage(`{"type":"math","expression":"2 + 3 > 1"}`), + }, + }, + }, + } + group := apimodels.PostableRuleGroupConfig{ + Name: "arulegroup", + Interval: model.Duration(10 * time.Second), + Rules: []apimodels.PostableExtendedRuleNode{rule}, + } + _ = api.PostRulesGroup(t, folderUID, &group, false) + + // Attempt to delete the folder via K8s API. This should be blocked by alert rules. + err := client.Resource.Delete(context.Background(), folderUID, metav1.DeleteOptions{}) + require.Error(t, err, "expected folder deletion to be blocked when alert rules exist") + + // Delete the rule group from ruler. + status, body := api.DeleteRulesGroup(t, folderUID, group.Name, true) + require.Equalf(t, http.StatusAccepted, status, body) + + // Now we should be able to delete the folder. + err = client.Resource.Delete(context.Background(), folderUID, metav1.DeleteOptions{}) + require.NoError(t, err) + }) +} + func doFolderTests(t *testing.T, helper *apis.K8sTestHelper) *apis.K8sTestHelper { t.Run("Check folder CRUD (just create for now) in legacy API appears in k8s apis", func(t *testing.T) { client := helper.GetResourceClient(apis.ResourceClientArgs{ @@ -1338,3 +1426,78 @@ func TestIntegrationRootFolderDeletionBlockedByLibraryElementsInSubfolder(t *tes }) } } + +// Test moving folders to root. +func TestIntegrationMoveNestedFolderToRootK8S(t *testing.T) { + testutil.SkipIntegrationTestInShortMode(t) + + if !db.IsTestDbSQLite() { + t.Skip("test only on sqlite for now") + } + + helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{ + AppModeProduction: true, + DisableAnonymous: true, + EnableFeatureToggles: []string{featuremgmt.FlagUnifiedStorageSearch}, + APIServerStorageType: "unified", + UnifiedStorageConfig: map[string]setting.UnifiedStorageConfig{ + folders.RESOURCEGROUP: { + DualWriterMode: grafanarest.Mode5, + }, + }, + }) + + client := helper.GetResourceClient(apis.ResourceClientArgs{ + User: helper.Org1.Admin, + GVR: gvr, + }) + + // Create f1 under root + f1Payload := `{"title":"Folder 1","uid":"f1"}` + createF1 := apis.DoRequest(helper, apis.RequestParams{ + User: client.Args.User, + Method: http.MethodPost, + Path: "/api/folders", + Body: []byte(f1Payload), + }, &dtos.Folder{}) + require.NotNil(t, createF1.Result) + require.Equal(t, http.StatusOK, createF1.Response.StatusCode) + require.Equal(t, "f1", createF1.Result.UID) + require.Equal(t, "", createF1.Result.ParentUID) + + // Create f2 under f1 + f2Payload := `{"title":"Folder 2","uid":"f2","parentUid":"f1"}` + createF2 := apis.DoRequest(helper, apis.RequestParams{ + User: client.Args.User, + Method: http.MethodPost, + Path: "/api/folders", + Body: []byte(f2Payload), + }, &dtos.Folder{}) + require.NotNil(t, createF2.Result) + require.Equal(t, http.StatusOK, createF2.Response.StatusCode) + require.Equal(t, "f2", createF2.Result.UID) + require.Equal(t, "f1", createF2.Result.ParentUID) + + // Move f2 to the root by having parentUid being empty in the request body + move := apis.DoRequest(helper, apis.RequestParams{ + User: client.Args.User, + Method: http.MethodPost, + Path: "/api/folders/f2/move", + Body: []byte(`{"parentUid":""}`), + }, &dtos.Folder{}) + require.NotNil(t, move.Result) + require.Equal(t, http.StatusOK, move.Response.StatusCode) + require.Equal(t, "f2", move.Result.UID) + require.Equal(t, "", move.Result.ParentUID) + + // Fetch the folder to confirm it is now at root + get := apis.DoRequest(helper, apis.RequestParams{ + User: client.Args.User, + Method: http.MethodGet, + Path: "/api/folders/f2", + }, &dtos.Folder{}) + require.NotNil(t, get.Result) + require.Equal(t, http.StatusOK, get.Response.StatusCode) + require.Equal(t, "f2", get.Result.UID) + require.Equal(t, "", get.Result.ParentUID) +} diff --git a/pkg/tests/apis/iam/iam_test.go b/pkg/tests/apis/iam/iam_test.go index 5ad2eccd8f4..ffd7dedf77c 100644 --- a/pkg/tests/apis/iam/iam_test.go +++ b/pkg/tests/apis/iam/iam_test.go @@ -69,8 +69,7 @@ func TestIntegrationIdentity(t *testing.T) { "title": "staff", "provisioned": false, "externalUID": "" - }, - "status": {} + } } ] }`, found) diff --git a/pkg/tests/apis/iam/team_integration_test.go b/pkg/tests/apis/iam/team_integration_test.go index f6e0a4605f6..e8bb115dfb7 100644 --- a/pkg/tests/apis/iam/team_integration_test.go +++ b/pkg/tests/apis/iam/team_integration_test.go @@ -38,17 +38,18 @@ func TestIntegrationTeams(t *testing.T) { featuremgmt.FlagKubernetesAuthnMutation, }, }) + doTeamCRUDTestsUsingTheNewAPIs(t, helper) if mode < 3 { - doTeamCRUDTestsUsingTheLegacyAPIs(t, helper) + doTeamCRUDTestsUsingTheLegacyAPIs(t, helper, mode) } }) } } func doTeamCRUDTestsUsingTheNewAPIs(t *testing.T, helper *apis.K8sTestHelper) { - t.Run("should create/get/delete team using the new APIs as a GrafanaAdmin", func(t *testing.T) { + t.Run("should create/get/update/delete team using the new APIs as a GrafanaAdmin", func(t *testing.T) { ctx := context.Background() teamClient := helper.GetResourceClient(apis.ResourceClientArgs{ @@ -57,6 +58,7 @@ func doTeamCRUDTestsUsingTheNewAPIs(t *testing.T, helper *apis.K8sTestHelper) { GVR: gvrTeams, }) + // Create the team created, err := teamClient.Resource.Create(ctx, helper.LoadYAMLOrJSONFile("testdata/team-test-create-v0.yaml"), metav1.CreateOptions{}) require.NoError(t, err) require.NotNil(t, created) @@ -69,6 +71,7 @@ func doTeamCRUDTestsUsingTheNewAPIs(t *testing.T, helper *apis.K8sTestHelper) { createdUID := created.GetName() require.NotEmpty(t, createdUID) + // Get the team fetched, err := teamClient.Resource.Get(ctx, createdUID, metav1.GetOptions{}) require.NoError(t, err) require.NotNil(t, fetched) @@ -81,6 +84,26 @@ func doTeamCRUDTestsUsingTheNewAPIs(t *testing.T, helper *apis.K8sTestHelper) { require.Equal(t, createdUID, fetched.GetName()) require.Equal(t, "default", fetched.GetNamespace()) + // Update the team + updatedTeam, err := teamClient.Resource.Update(ctx, helper.LoadYAMLOrJSONFile("testdata/team-test-update-v0.yaml"), metav1.UpdateOptions{}) + require.NoError(t, err) + require.NotNil(t, updatedTeam) + + updatedSpec := updatedTeam.Object["spec"].(map[string]interface{}) + require.Equal(t, "Test Team 2", updatedSpec["title"]) + require.Equal(t, "testteam2@example123.com", updatedSpec["email"]) + require.Equal(t, false, updatedSpec["provisioned"]) + + verifiedTeam, err := teamClient.Resource.Get(ctx, createdUID, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, verifiedTeam) + + verifiedSpec := verifiedTeam.Object["spec"].(map[string]interface{}) + require.Equal(t, "Test Team 2", verifiedSpec["title"]) + require.Equal(t, "testteam2@example123.com", verifiedSpec["email"]) + require.Equal(t, false, verifiedSpec["provisioned"]) + + // Delete the team err = teamClient.Resource.Delete(ctx, createdUID, metav1.DeleteOptions{}) require.NoError(t, err) @@ -202,12 +225,14 @@ func doTeamCRUDTestsUsingTheNewAPIs(t *testing.T, helper *apis.K8sTestHelper) { }) } -func doTeamCRUDTestsUsingTheLegacyAPIs(t *testing.T, helper *apis.K8sTestHelper) { - t.Run("should create team using legacy APIs and get/delete it using the new APIs", func(t *testing.T) { +func doTeamCRUDTestsUsingTheLegacyAPIs(t *testing.T, helper *apis.K8sTestHelper, mode rest.DualWriterMode) { + t.Run("should create team using legacy APIs and get/update/delete it using the new APIs", func(t *testing.T) { ctx := context.Background() + teamClient := helper.GetResourceClient(apis.ResourceClientArgs{ - User: helper.Org1.Admin, - GVR: gvrTeams, + User: helper.Org1.Admin, + Namespace: helper.Namespacer(helper.Org1.Admin.Identity.GetOrgID()), + GVR: gvrTeams, }) legacyTeamPayload := `{ @@ -243,6 +268,31 @@ func doTeamCRUDTestsUsingTheLegacyAPIs(t *testing.T, helper *apis.K8sTestHelper) require.Equal(t, rsp.Result.UID, team.GetName()) require.Equal(t, "default", team.GetNamespace()) + // Updating the team is not supported in Mode2 if the team has been created using the legacy APIs + if mode < rest.Mode2 { + team.Object["spec"].(map[string]interface{})["title"] = "Updated Test Team 2" + team.Object["spec"].(map[string]interface{})["email"] = "updated@example.com" + + updatedTeam, err := teamClient.Resource.Update(ctx, team, metav1.UpdateOptions{}) + require.NoError(t, err) + require.NotNil(t, updatedTeam) + + updatedSpec := updatedTeam.Object["spec"].(map[string]interface{}) + require.Equal(t, "Updated Test Team 2", updatedSpec["title"]) + require.Equal(t, "updated@example.com", updatedSpec["email"]) + require.Equal(t, false, updatedSpec["provisioned"]) + + verifiedTeam, err := teamClient.Resource.Get(ctx, rsp.Result.UID, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, verifiedTeam) + + verifiedSpec := verifiedTeam.Object["spec"].(map[string]interface{}) + require.Equal(t, "Updated Test Team 2", verifiedSpec["title"]) + require.Equal(t, "updated@example.com", verifiedSpec["email"]) + require.Equal(t, false, verifiedSpec["provisioned"]) + } + + // Delete the team err = teamClient.Resource.Delete(ctx, rsp.Result.UID, metav1.DeleteOptions{}) require.NoError(t, err) diff --git a/pkg/tests/apis/iam/testdata/team-test-update-v0.yaml b/pkg/tests/apis/iam/testdata/team-test-update-v0.yaml new file mode 100644 index 00000000000..0b01271cb3f --- /dev/null +++ b/pkg/tests/apis/iam/testdata/team-test-update-v0.yaml @@ -0,0 +1,7 @@ +apiVersion: iam.grafana.app/v0alpha1 +kind: Team +metadata: + name: test-team-1 +spec: + title: "Test Team 2" + email: testteam2@example123.com diff --git a/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json b/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json index a1d5158e140..3561b54c453 100644 --- a/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/iam.grafana.app-v0alpha1.json @@ -3502,8 +3502,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -3530,14 +3529,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -3618,66 +3609,11 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.ServiceAccountstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.Team": { "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -3704,14 +3640,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -3726,8 +3654,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -3754,14 +3681,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -3818,20 +3737,23 @@ "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingSpec": { "type": "object", "required": [ - "subjects", - "teamRef" + "subject", + "teamRef", + "permission" ], "properties": { - "subjects": { - "type": "array", - "items": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingspecSubject" - } - ] - } + "permission": { + "description": "permission of the identity in the team", + "type": "string", + "default": "" + }, + "subject": { + "default": {}, + "allOf": [ + { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingspecSubject" + } + ] }, "teamRef": { "default": {}, @@ -3843,30 +3765,6 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingstatusOperatorState" - } - ] - } - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingTeamRef": { "type": "object", "required": [ @@ -3883,49 +3781,13 @@ "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingspecSubject": { "type": "object", "required": [ - "name", - "permission" + "name" ], "properties": { "name": { "description": "uid of the identity", "type": "string", "default": "" - }, - "permission": { - "description": "permission of the identity in the team", - "type": "string", - "default": "" - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamBindingstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" } } }, @@ -3999,66 +3861,11 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.TeamstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.User": { "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4085,14 +3892,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -4193,60 +3992,6 @@ } } }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.iam.pkg.apis.iam.v0alpha1.UserstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.pkg.apimachinery.apis.common.v0alpha1.Unstructured": { "type": "object", "additionalProperties": true, @@ -4705,8 +4450,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4723,9 +4467,6 @@ "spec": { "description": "Spec is the spec of the CoreRole", "default": {} - }, - "status": { - "default": {} } } }, @@ -4864,8 +4605,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4882,9 +4622,6 @@ "spec": { "description": "Spec is the spec of the GlobalRole", "default": {} - }, - "status": { - "default": {} } } }, @@ -4892,8 +4629,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -4910,9 +4646,6 @@ "spec": { "description": "Spec is the spec of the GlobalRoleBinding", "default": {} - }, - "status": { - "default": {} } } }, @@ -5182,8 +4915,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5200,9 +4932,6 @@ "spec": { "description": "Spec is the spec of the ResourcePermission", "default": {} - }, - "status": { - "default": {} } } }, @@ -5353,8 +5082,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5371,9 +5099,6 @@ "spec": { "description": "Spec is the spec of the Role", "default": {} - }, - "status": { - "default": {} } } }, @@ -5381,8 +5106,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5399,9 +5123,6 @@ "spec": { "description": "Spec is the spec of the RoleBinding", "default": {} - }, - "status": { - "default": {} } } }, @@ -5671,8 +5392,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5689,9 +5409,6 @@ "spec": { "description": "Spec is the spec of the ServiceAccount", "default": {} - }, - "status": { - "default": {} } } }, @@ -5801,8 +5518,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5819,9 +5535,6 @@ "spec": { "description": "Spec is the spec of the Team", "default": {} - }, - "status": { - "default": {} } } }, @@ -5829,8 +5542,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -5847,9 +5559,6 @@ "spec": { "description": "Spec is the spec of the TeamBinding", "default": {} - }, - "status": { - "default": {} } } }, @@ -5882,15 +5591,18 @@ "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingSpec": { "type": "object", "required": [ - "subjects", - "teamRef" + "subject", + "teamRef", + "permission" ], "properties": { - "subjects": { - "type": "array", - "items": { - "default": {} - } + "permission": { + "description": "permission of the identity in the team", + "type": "string", + "default": "" + }, + "subject": { + "default": {} }, "teamRef": { "default": {} @@ -5932,19 +5644,13 @@ "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamBindingspecSubject": { "type": "object", "required": [ - "name", - "permission" + "name" ], "properties": { "name": { "description": "uid of the identity", "type": "string", "default": "" - }, - "permission": { - "description": "permission of the identity in the team", - "type": "string", - "default": "" } } }, @@ -6084,8 +5790,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -6102,9 +5807,6 @@ "spec": { "description": "Spec is the spec of the User", "default": {} - }, - "status": { - "default": {} } } }, diff --git a/pkg/tests/apis/openapi_snapshots/notifications.alerting.grafana.app-v0alpha1.json b/pkg/tests/apis/openapi_snapshots/notifications.alerting.grafana.app-v0alpha1.json index 4816ea0580f..7973627042e 100644 --- a/pkg/tests/apis/openapi_snapshots/notifications.alerting.grafana.app-v0alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/notifications.alerting.grafana.app-v0alpha1.json @@ -4520,6 +4520,12 @@ "schemas": { "com.github.grafana.grafana.apps.alerting.notifications.pkg.apis.alertingnotifications.v0alpha1.Receiver": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -4708,6 +4714,12 @@ }, "com.github.grafana.grafana.apps.alerting.notifications.pkg.apis.alertingnotifications.v0alpha1.RoutingTree": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -4967,6 +4979,12 @@ }, "com.github.grafana.grafana.apps.alerting.notifications.pkg.apis.alertingnotifications.v0alpha1.TemplateGroup": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -5116,6 +5134,12 @@ }, "com.github.grafana.grafana.apps.alerting.notifications.pkg.apis.alertingnotifications.v0alpha1.TimeInterval": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", diff --git a/pkg/tests/apis/openapi_snapshots/playlist.grafana.app-v0alpha1.json b/pkg/tests/apis/openapi_snapshots/playlist.grafana.app-v0alpha1.json index 96ff9719372..dd1b486823e 100644 --- a/pkg/tests/apis/openapi_snapshots/playlist.grafana.app-v0alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/playlist.grafana.app-v0alpha1.json @@ -1160,6 +1160,12 @@ "schemas": { "com.github.grafana.grafana.apps.playlist.pkg.apis.playlist.v0alpha1.Playlist": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", diff --git a/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json b/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json index c5d87c4088d..1b9b680af34 100644 --- a/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/preferences.grafana.app-v1alpha1.json @@ -43,6 +43,98 @@ ], "description": "list objects of kind Preferences", "operationId": "listPreferences", + "parameters": [ + { + "name": "allowWatchBookmarks", + "in": "query", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "continue", + "in": "query", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldSelector", + "in": "query", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "labelSelector", + "in": "query", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "limit", + "in": "query", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "name": "resourceVersion", + "in": "query", + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "resourceVersionMatch", + "in": "query", + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "sendInitialEvents", + "in": "query", + "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "timeoutSeconds", + "in": "query", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "name": "watch", + "in": "query", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ], "responses": { "200": { "description": "OK", @@ -82,52 +174,131 @@ "kind": "Preferences" } }, + "post": { + "tags": [ + "Preferences" + ], + "description": "create Preferences", + "operationId": "createPreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldManager", + "in": "query", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldValidation", + "in": "query", + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "201": { + "description": "Created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "202": { + "description": "Accepted", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + } + }, + "x-kubernetes-action": "post", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, "parameters": [ - { - "name": "allowWatchBookmarks", - "in": "query", - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "name": "continue", - "in": "query", - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "fieldSelector", - "in": "query", - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "labelSelector", - "in": "query", - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "limit", - "in": "query", - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, { "name": "namespace", "in": "path", @@ -146,51 +317,6 @@ "type": "string", "uniqueItems": true } - }, - { - "name": "resourceVersion", - "in": "query", - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "resourceVersionMatch", - "in": "query", - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "name": "sendInitialEvents", - "in": "query", - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "name": "timeoutSeconds", - "in": "query", - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "name": "watch", - "in": "query", - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "schema": { - "type": "boolean", - "uniqueItems": true - } } ] }, @@ -260,6 +386,330 @@ "kind": "Preferences" } }, + "put": { + "tags": [ + "Preferences" + ], + "description": "replace the specified Preferences", + "operationId": "replacePreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldManager", + "in": "query", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldValidation", + "in": "query", + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "201": { + "description": "Created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + } + }, + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, + "delete": { + "tags": [ + "Preferences" + ], + "description": "delete Preferences", + "operationId": "deletePreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "gracePeriodSeconds", + "in": "query", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "name": "ignoreStoreReadErrorWithClusterBreakingPotential", + "in": "query", + "description": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "orphanDependents", + "in": "query", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "name": "propagationPolicy", + "in": "query", + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + } + }, + "202": { + "description": "Accepted", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + } + } + }, + "x-kubernetes-action": "delete", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, + "patch": { + "tags": [ + "Preferences" + ], + "description": "partially update the specified Preferences", + "operationId": "updatePreferences", + "parameters": [ + { + "name": "dryRun", + "in": "query", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldManager", + "in": "query", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "fieldValidation", + "in": "query", + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "name": "force", + "in": "query", + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/apply-patch+yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/json-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/merge-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/strategic-merge-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + }, + "201": { + "description": "Created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Preferences" + } + } + } + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "preferences.grafana.app", + "version": "v1alpha1", + "kind": "Preferences" + } + }, "parameters": [ { "name": "name", @@ -1232,8 +1682,7 @@ "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -1260,14 +1709,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -1412,66 +1853,11 @@ } } }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.PreferencesstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.Stars": { "type": "object", "required": [ "metadata", - "spec", - "status" + "spec" ], "properties": { "apiVersion": { @@ -1498,14 +1884,6 @@ "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsSpec" } ] - }, - "status": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsStatus" - } - ] } }, "x-kubernetes-group-version-kind": [ @@ -1605,60 +1983,6 @@ } } }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsStatus": { - "type": "object", - "properties": { - "additionalFields": { - "description": "additionalFields is reserved for future use", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "operatorStates": { - "description": "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.", - "type": "object", - "additionalProperties": { - "default": {}, - "allOf": [ - { - "$ref": "#/components/schemas/com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsstatusOperatorState" - } - ] - } - } - } - }, - "com.github.grafana.grafana.apps.preferences.pkg.apis.preferences.v1alpha1.StarsstatusOperatorState": { - "type": "object", - "required": [ - "lastEvaluation", - "state" - ], - "properties": { - "descriptiveState": { - "description": "descriptiveState is an optional more descriptive state field which has no requirements on format", - "type": "string" - }, - "details": { - "description": "details contains any extra information that is operator-specific", - "type": "object", - "additionalProperties": { - "type": "object" - } - }, - "lastEvaluation": { - "description": "lastEvaluation is the ResourceVersion last evaluated", - "type": "string", - "default": "" - }, - "state": { - "description": "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.", - "type": "string", - "default": "" - } - } - }, "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { "description": "APIResource specifies the name of a resource and whether it is namespaced.", "type": "object", diff --git a/pkg/tests/apis/openapi_snapshots/provisioning.grafana.app-v0alpha1.json b/pkg/tests/apis/openapi_snapshots/provisioning.grafana.app-v0alpha1.json index 3e2e3a1644b..bdfa6ce9490 100644 --- a/pkg/tests/apis/openapi_snapshots/provisioning.grafana.app-v0alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/provisioning.grafana.app-v0alpha1.json @@ -3706,14 +3706,14 @@ "group": { "type": "string" }, + "kind": { + "type": "string" + }, "noop": { "description": "No action required (useful for sync)", "type": "integer", "format": "int64" }, - "resource": { - "type": "string" - }, "total": { "type": "integer", "format": "int64" diff --git a/pkg/tests/apis/openapi_snapshots/rules.alerting.grafana.app-v0alpha1.json b/pkg/tests/apis/openapi_snapshots/rules.alerting.grafana.app-v0alpha1.json index 0d12c909329..0e646c08d01 100644 --- a/pkg/tests/apis/openapi_snapshots/rules.alerting.grafana.app-v0alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/rules.alerting.grafana.app-v0alpha1.json @@ -2280,6 +2280,12 @@ "schemas": { "com.github.grafana.grafana.apps.alerting.rules.pkg.apis.alerting.v0alpha1.AlertRule": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", @@ -2603,6 +2609,12 @@ }, "com.github.grafana.grafana.apps.alerting.rules.pkg.apis.alerting.v0alpha1.RecordingRule": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", diff --git a/pkg/tests/apis/openapi_snapshots/shorturl.grafana.app-v1alpha1.json b/pkg/tests/apis/openapi_snapshots/shorturl.grafana.app-v1alpha1.json index 9807d9c13b9..574ce2f474a 100644 --- a/pkg/tests/apis/openapi_snapshots/shorturl.grafana.app-v1alpha1.json +++ b/pkg/tests/apis/openapi_snapshots/shorturl.grafana.app-v1alpha1.json @@ -1220,6 +1220,12 @@ }, "com.github.grafana.grafana.apps.shorturl.pkg.apis.shorturl.v1alpha1.ShortURL": { "type": "object", + "required": [ + "kind", + "apiVersion", + "metadata", + "spec" + ], "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", diff --git a/pkg/tests/apis/preferences/preferences_test.go b/pkg/tests/apis/preferences/preferences_test.go index 01caaae8318..5de09e16fa8 100644 --- a/pkg/tests/apis/preferences/preferences_test.go +++ b/pkg/tests/apis/preferences/preferences_test.go @@ -85,6 +85,8 @@ func TestIntegrationPreferences(t *testing.T) { }, &raw) require.Equal(t, http.StatusOK, legacyResponse.Response.StatusCode, "create preference for user") + adminPrefsName := "user-" + clientAdmin.Args.User.Identity.GetIdentifier() + // Admin has access to all three (namespace, team, and user) rsp, err = clientAdmin.Resource.List(ctx, metav1.ListOptions{}) require.NoError(t, err) @@ -95,9 +97,31 @@ func TestIntegrationPreferences(t *testing.T) { require.Equal(t, []string{ "namespace", fmt.Sprintf("team-%s", helper.Org1.Staff.UID), - fmt.Sprintf("user-%s", clientAdmin.Args.User.Identity.GetIdentifier()), + adminPrefsName, }, names) + obj, err := clientAdmin.Resource.Get(ctx, adminPrefsName, metav1.GetOptions{}) + require.NoError(t, err) + jj, err := json.MarshalIndent(obj.Object["spec"], "", " ") + require.NoError(t, err) + require.JSONEq(t, `{ + "weekStart":"saturday" + }`, string(jj)) + obj.Object["spec"] = map[string]any{ + "weekStart": "saturday", + "regionalFormat": "dd/mm/yyyy", + } + + // Set the regional format via k8s API + obj, err = clientAdmin.Resource.Update(ctx, obj, metav1.UpdateOptions{}) + require.NoError(t, err) + jj, err = json.MarshalIndent(obj.Object["spec"], "", " ") + require.NoError(t, err) + require.JSONEq(t, `{ + "weekStart": "saturday", + "regionalFormat": "dd/mm/yyyy" + }`, string(jj)) + // The viewer should only have namespace (eg org level) permissions rsp, err = clientViewer.Resource.List(ctx, metav1.ListOptions{}) require.NoError(t, err) @@ -118,14 +142,14 @@ func TestIntegrationPreferences(t *testing.T) { }, &shim{}) require.Equal(t, http.StatusOK, bootdata.Response.StatusCode, "get bootdata preferences") - jj, _ := json.Marshal(bootdata.Result.User) + jj, _ = json.Marshal(bootdata.Result.User) require.JSONEq(t, `{ "timezone":"africa", "weekStart":"saturday", "theme":"dark", "language":"en-US", `+ // FROM global default! - `"regionalFormat":"" - }`, string(jj)) + `"regionalFormat": ""}`, // why empty? + string(jj)) merged := apis.DoRequest(helper, apis.RequestParams{ User: clientAdmin.Args.User, @@ -133,9 +157,10 @@ func TestIntegrationPreferences(t *testing.T) { Path: "/apis/preferences.grafana.app/v1alpha1/namespaces/default/preferences/merged", }, &preferences.Preferences{}) require.Equal(t, http.StatusOK, merged.Response.StatusCode, "get merged preferences") - require.Equal(t, "saturday", *merged.Result.Spec.WeekStart) // from user - require.Equal(t, "africa", *merged.Result.Spec.Timezone) // from team - require.Equal(t, "dark", *merged.Result.Spec.Theme) // from org - require.Equal(t, "en-US", *merged.Result.Spec.Language) // settings.ini + require.Equal(t, "saturday", *merged.Result.Spec.WeekStart) // from user + require.Equal(t, "africa", *merged.Result.Spec.Timezone) // from team + require.Equal(t, "dark", *merged.Result.Spec.Theme) // from org + require.Equal(t, "en-US", *merged.Result.Spec.Language) // settings.ini + require.Equal(t, "dd/mm/yyyy", *merged.Result.Spec.RegionalFormat) // from user update }) } diff --git a/pkg/tests/apis/preferences/stars_test.go b/pkg/tests/apis/preferences/stars_test.go index 93106383cfa..3efc2469119 100644 --- a/pkg/tests/apis/preferences/stars_test.go +++ b/pkg/tests/apis/preferences/stars_test.go @@ -26,14 +26,19 @@ func TestIntegrationStars(t *testing.T) { for _, mode := range []grafanarest.DualWriterMode{ grafanarest.Mode0, - grafanarest.Mode2, // anything past 2 will fail + grafanarest.Mode2, + grafanarest.Mode3, + grafanarest.Mode5, } { + flags := []string{featuremgmt.FlagGrafanaAPIServerWithExperimentalAPIs} + if mode > grafanarest.Mode2 { + flags = append(flags, featuremgmt.FlagKubernetesStars) + } + helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{ - AppModeProduction: false, // required for experimental APIs - DisableAnonymous: true, - EnableFeatureToggles: []string{ - featuremgmt.FlagGrafanaAPIServerWithExperimentalAPIs, - }, + AppModeProduction: false, // required for experimental APIs + DisableAnonymous: true, + EnableFeatureToggles: flags, UnifiedStorageConfig: map[string]setting.UnifiedStorageConfig{ "dashboards.dashboard.grafana.app": { DualWriterMode: mode, diff --git a/pkg/tests/apis/provisioning/job_conflict_test.go b/pkg/tests/apis/provisioning/job_conflict_test.go new file mode 100644 index 00000000000..135743d4cf3 --- /dev/null +++ b/pkg/tests/apis/provisioning/job_conflict_test.go @@ -0,0 +1,83 @@ +package provisioning + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1" + "github.com/grafana/grafana/pkg/tests/testinfra" + "github.com/grafana/grafana/pkg/util/testutil" +) + +// TestIntegrationProvisioning_JobConflict tests that if two concurrent drivers try to update a +// job they received before the other updated it, that one will fail. This is critical for concurrent jobs +func TestIntegrationProvisioning_JobConflict(t *testing.T) { + testutil.SkipIntegrationTestInShortMode(t) + + // disable the controllers so the jobs don't get auto-processed + helper := runGrafana(t, func(opts *testinfra.GrafanaOpts) { + opts.DisableProvisioningControllers = true + }) + ctx := context.Background() + + // create a job + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "provisioning.grafana.app/v0alpha1", + "kind": "Job", + "metadata": map[string]interface{}{ + "name": "test-job-conflict", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "action": string(provisioning.JobActionPull), + "repository": "test-repo", + "pull": map[string]interface{}{ + "incremental": false, + }, + }, + }, + } + createdJob, err := helper.Jobs.Resource.Create(ctx, obj, metav1.CreateOptions{}) + require.NoError(t, err) + + // have two clients get the same job before either has updated it. this simulates the race condition + // between two concurrent workers. + job, err := helper.Jobs.Resource.Get(ctx, createdJob.GetName(), metav1.GetOptions{}) + require.NoError(t, err) + job2, err := helper.Jobs.Resource.Get(ctx, createdJob.GetName(), metav1.GetOptions{}) + require.NoError(t, err) + + // have the first client update the job, this should update the RV + client1Update := job.DeepCopy() + if client1Update.GetLabels() == nil { + client1Update.SetLabels(make(map[string]string)) + } + labels := client1Update.GetLabels() + labels["provisioning.grafana.app/claim"] = "client1-claim" + client1Update.SetLabels(labels) + updatedJob, err := helper.Jobs.Resource.Update(ctx, client1Update, metav1.UpdateOptions{}) + require.NoError(t, err) + require.NotEqual(t, job.GetResourceVersion(), updatedJob.GetResourceVersion()) + + // now when client two tries to update the job, the RV is no longer what it originally received + client2Update := job2.DeepCopy() + if client2Update.GetLabels() == nil { + client2Update.SetLabels(make(map[string]string)) + } + labels2 := client2Update.GetLabels() + labels2["provisioning.grafana.app/claim"] = "client2-claim" + client2Update.SetLabels(labels2) + _, err = helper.Jobs.Resource.Update(ctx, client2Update, metav1.UpdateOptions{}) + require.Error(t, err) + require.True(t, apierrors.IsConflict(err), "should get conflict error when updating with stale resource version") + + // clean up + err = helper.Jobs.Resource.Delete(ctx, createdJob.GetName(), metav1.DeleteOptions{}) + require.NoError(t, err) +} diff --git a/pkg/tests/testinfra/testinfra.go b/pkg/tests/testinfra/testinfra.go index bac5f5208f7..b9f23fb4d44 100644 --- a/pkg/tests/testinfra/testinfra.go +++ b/pkg/tests/testinfra/testinfra.go @@ -129,7 +129,7 @@ func StartGrafanaEnv(t *testing.T, grafDir, cfgPath string) (string, *server.Tes var storage sql.UnifiedStorageGrpcService if runstore { storage, err = sql.ProvideUnifiedStorageGrpcService(env.Cfg, env.FeatureToggles, env.SQLStore, - env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}) + env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil) require.NoError(t, err) ctx := context.Background() err = storage.StartAsync(ctx) @@ -545,6 +545,13 @@ func CreateGrafDir(t *testing.T, opts GrafanaOpts) (string, string) { require.NoError(t, err) } + if opts.DisableProvisioningControllers { + provisioningSection, err := getOrCreateSection("provisioning") + require.NoError(t, err) + _, err = provisioningSection.NewKey("disable_controllers", "true") + require.NoError(t, err) + } + dashboardsSection, err := getOrCreateSection("dashboards") require.NoError(t, err) _, err = dashboardsSection.NewKey("min_refresh_interval", "10s") @@ -615,6 +622,7 @@ type GrafanaOpts struct { EnableRecordingRules bool EnableSCIM bool APIServerRuntimeConfig string + DisableProvisioningControllers bool // When "unified-grpc" is selected it will also start the grpc server APIServerStorageType options.StorageType diff --git a/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth.go new file mode 100644 index 00000000000..74bc6923eac --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth.go @@ -0,0 +1,117 @@ +package pgx + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "strings" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/log" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" +) + +func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + err := e.Ping(ctx) + if err != nil { + logCheckHealthError(ctx, e.dsInfo, err) + if strings.EqualFold(req.PluginContext.User.Role, "Admin") { + return ErrToHealthCheckResult(err) + } + errResponse := &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: e.TransformQueryError(e.log, err).Error(), + } + return errResponse, nil + } + return &backend.CheckHealthResult{Status: backend.HealthStatusOk, Message: "Database Connection OK"}, nil +} + +// ErrToHealthCheckResult converts error into user friendly health check message +// This should be called with non nil error. If the err parameter is empty, we will send Internal Server Error +func ErrToHealthCheckResult(err error) (*backend.CheckHealthResult, error) { + if err == nil { + return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"}, nil + } + res := &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: err.Error()} + details := map[string]string{ + "verboseMessage": err.Error(), + "errorDetailsLink": "https://grafana.com/docs/grafana/latest/datasources/postgres", + } + var opErr *net.OpError + if errors.As(err, &opErr) { + res.Message = "Network error: Failed to connect to the server" + if opErr != nil && opErr.Err != nil { + errMessage := opErr.Err.Error() + if strings.HasSuffix(opErr.Err.Error(), "no such host") { + errMessage = "no such host" + } + if strings.HasSuffix(opErr.Err.Error(), "unknown port") { + errMessage = "unknown port" + } + if strings.HasSuffix(opErr.Err.Error(), "invalid port") { + errMessage = "invalid port" + } + if strings.HasSuffix(opErr.Err.Error(), "missing port in address") { + errMessage = "missing port in address" + } + if strings.HasSuffix(opErr.Err.Error(), "invalid syntax") { + errMessage = "invalid syntax found in the address" + } + res.Message += fmt.Sprintf(". Error message: %s", errMessage) + } + } + + if errors.Is(err, sqleng.ErrParsingPostgresURL) { + res.Message = fmt.Sprintf("Connection string error: %s", sqleng.ErrParsingPostgresURL.Error()) + if unwrappedErr := errors.Unwrap(err); unwrappedErr != nil { + details["verboseMessage"] = unwrappedErr.Error() + } + } + detailBytes, marshalErr := json.Marshal(details) + if marshalErr != nil { + return res, nil + } + res.JSONDetails = detailBytes + return res, nil +} + +func logCheckHealthError(ctx context.Context, dsInfo sqleng.DataSourceInfo, err error) { + logger := log.DefaultLogger.FromContext(ctx) + configSummary := map[string]any{ + "config_url_length": len(dsInfo.URL), + "config_user_length": len(dsInfo.User), + "config_database_length": len(dsInfo.Database), + "config_json_data_database_length": len(dsInfo.JsonData.Database), + "config_max_open_conns": dsInfo.JsonData.MaxOpenConns, + "config_max_idle_conns": dsInfo.JsonData.MaxIdleConns, + "config_conn_max_life_time": dsInfo.JsonData.ConnMaxLifetime, + "config_conn_timeout": dsInfo.JsonData.ConnectionTimeout, + "config_timescaledb": dsInfo.JsonData.Timescaledb, + "config_ssl_mode": dsInfo.JsonData.Mode, + "config_tls_configuration_method": dsInfo.JsonData.ConfigurationMethod, + "config_tls_skip_verify": dsInfo.JsonData.TlsSkipVerify, + "config_timezone": dsInfo.JsonData.Timezone, + "config_time_interval": dsInfo.JsonData.TimeInterval, + "config_enable_secure_proxy": dsInfo.JsonData.SecureDSProxy, + "config_allow_clear_text_passwords": dsInfo.JsonData.AllowCleartextPasswords, + "config_authentication_type": dsInfo.JsonData.AuthenticationType, + "config_ssl_root_cert_file_length": len(dsInfo.JsonData.RootCertFile), + "config_ssl_cert_file_length": len(dsInfo.JsonData.CertFile), + "config_ssl_key_file_length": len(dsInfo.JsonData.CertKeyFile), + "config_encrypt_length": len(dsInfo.JsonData.Encrypt), + "config_server_name_length": len(dsInfo.JsonData.Servername), + "config_password_length": len(dsInfo.DecryptedSecureJSONData["password"]), + "config_tls_ca_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsCACert"]), + "config_tls_client_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsClientCert"]), + "config_tls_client_key_length": len(dsInfo.DecryptedSecureJSONData["tlsClientKey"]), + } + configSummaryJSON, marshalError := json.Marshal(configSummary) + if marshalError != nil { + logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error") + return + } + logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error_detailed", "details", string(configSummaryJSON)) +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth_test.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth_test.go new file mode 100644 index 00000000000..16536368622 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/handler_checkhealth_test.go @@ -0,0 +1,61 @@ +package pgx + +import ( + "errors" + "fmt" + "net" + "testing" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrToHealthCheckResult(t *testing.T) { + tests := []struct { + name string + err error + want *backend.CheckHealthResult + }{ + { + name: "without error", + want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"}, + }, + { + name: "network error", + err: errors.Join(errors.New("foo"), &net.OpError{Op: "read", Net: "tcp", Err: errors.New("some op")}), + want: &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "Network error: Failed to connect to the server. Error message: some op", + JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"foo\nread tcp: some op"}`), + }, + }, + { + name: "regular error", + err: errors.New("internal server error"), + want: &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "internal server error", + JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"internal server error"}`), + }, + }, + { + name: "invalid port specifier error", + err: fmt.Errorf("%w %q: %w", sqleng.ErrParsingPostgresURL, `"foo.bar.co"`, errors.New(`strconv.Atoi: parsing "foo.bar.co": invalid syntax`)), + want: &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "Connection string error: error parsing postgres url", + JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"error parsing postgres url \"\\\"foo.bar.co\\\"\": strconv.Atoi: parsing \"foo.bar.co\": invalid syntax"}`), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ErrToHealthCheckResult(tt.err) + require.Nil(t, err) + assert.Equal(t, string(tt.want.JSONDetails), string(got.JSONDetails)) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_pgx.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine.go similarity index 65% rename from pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_pgx.go rename to pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine.go index 6a1ee58c26c..8f8af632f13 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_pgx.go +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine.go @@ -1,25 +1,109 @@ -package sqleng +package pgx import ( "context" "encoding/json" "errors" "fmt" + "net" "runtime/debug" + "strconv" "strings" "sync" "time" "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/gtime" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" "github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" ) -func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer, +// MetaKeyExecutedQueryString is the key where the executed query should get stored +const MetaKeyExecutedQueryString = "executedQueryString" + +// SQLMacroEngine interpolates macros into sql. It takes in the Query to have access to query context and +// timeRange to be able to generate queries that use from and to. +type SQLMacroEngine interface { + Interpolate(query *backend.DataQuery, timeRange backend.TimeRange, sql string) (string, error) +} + +// SqlQueryResultTransformer transforms a query result row to RowValues with proper types. +type SqlQueryResultTransformer interface { + // TransformQueryError transforms a query error. + TransformQueryError(logger log.Logger, err error) error + GetConverterList() []sqlutil.StringConverter +} + +type JsonData struct { + MaxOpenConns int `json:"maxOpenConns"` + MaxIdleConns int `json:"maxIdleConns"` + ConnMaxLifetime int `json:"connMaxLifetime"` + ConnectionTimeout int `json:"connectionTimeout"` + Timescaledb bool `json:"timescaledb"` + Mode string `json:"sslmode"` + ConfigurationMethod string `json:"tlsConfigurationMethod"` + TlsSkipVerify bool `json:"tlsSkipVerify"` + RootCertFile string `json:"sslRootCertFile"` + CertFile string `json:"sslCertFile"` + CertKeyFile string `json:"sslKeyFile"` + Timezone string `json:"timezone"` + Encrypt string `json:"encrypt"` + Servername string `json:"servername"` + TimeInterval string `json:"timeInterval"` + Database string `json:"database"` + SecureDSProxy bool `json:"enableSecureSocksProxy"` + SecureDSProxyUsername string `json:"secureSocksProxyUsername"` + AllowCleartextPasswords bool `json:"allowCleartextPasswords"` + AuthenticationType string `json:"authenticationType"` +} + +type DataPluginConfiguration struct { + DSInfo sqleng.DataSourceInfo + TimeColumnNames []string + MetricColumnTypes []string + RowLimit int64 +} + +type DataSourceHandler struct { + macroEngine SQLMacroEngine + queryResultTransformer SqlQueryResultTransformer + timeColumnNames []string + metricColumnTypes []string + log log.Logger + dsInfo sqleng.DataSourceInfo + rowLimit int64 + userError string + pool *pgxpool.Pool +} + +type QueryJson struct { + RawSql string `json:"rawSql"` + Fill bool `json:"fill"` + FillInterval float64 `json:"fillInterval"` + FillMode string `json:"fillMode"` + FillValue float64 `json:"fillValue"` + Format string `json:"format"` +} + +func (e *DataSourceHandler) TransformQueryError(logger log.Logger, err error) error { + // OpError is the error type usually returned by functions in the net + // package. It describes the operation, network type, and address of + // an error. We log this error rather than return it to the client + // for security purposes. + var opErr *net.OpError + if errors.As(err, &opErr) { + return fmt.Errorf("failed to connect to server - %s", e.userError) + } + + return e.queryResultTransformer.TransformQueryError(logger, err) +} + +func NewQueryDataHandler(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer, macroEngine SQLMacroEngine, log log.Logger) (*DataSourceHandler, error) { queryDataHandler := DataSourceHandler{ queryResultTransformer: queryResultTransformer, @@ -43,7 +127,12 @@ func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, conf return &queryDataHandler, nil } -func (e *DataSourceHandler) DisposePGX() { +type DBDataResponse struct { + dataResponse backend.DataResponse + refID string +} + +func (e *DataSourceHandler) Dispose() { e.log.Debug("Disposing DB...") if e.pool != nil { @@ -53,11 +142,11 @@ func (e *DataSourceHandler) DisposePGX() { e.log.Debug("DB disposed") } -func (e *DataSourceHandler) PingPGX(ctx context.Context) error { +func (e *DataSourceHandler) Ping(ctx context.Context) error { return e.pool.Ping(ctx) } -func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { +func (e *DataSourceHandler) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { result := backend.NewQueryDataResponse() ch := make(chan DBDataResponse, len(req.Queries)) var wg sync.WaitGroup @@ -83,7 +172,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query } wg.Add(1) - go e.executeQueryPGX(ctx, query, &wg, ch, queryjson) + go e.executeQuery(ctx, query, &wg, ch, queryjson) } wg.Wait() @@ -101,7 +190,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query func (e *DataSourceHandler) handleQueryError(frameErr string, err error, query string, source backend.ErrorSource, ch chan DBDataResponse, queryResult DBDataResponse) { var emptyFrame data.Frame emptyFrame.SetMeta(&data.FrameMeta{ExecutedQueryString: query}) - if backend.IsDownstreamError(err) { + if isDownstreamError(err) { source = backend.ErrorSourceDownstream } queryResult.dataResponse.Error = fmt.Errorf("%s: %w", frameErr, err) @@ -127,6 +216,18 @@ func (e *DataSourceHandler) handlePanic(logger log.Logger, queryResult *DBDataRe } } +// Interpolate provides global macros/substitutions for all sql datasources. +var Interpolate = func(query backend.DataQuery, timeRange backend.TimeRange, timeInterval string, sql string) string { + interval := query.Interval + + sql = strings.ReplaceAll(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10)) + sql = strings.ReplaceAll(sql, "$__interval", gtime.FormatInterval(interval)) + sql = strings.ReplaceAll(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.From.UTC().Unix())) + sql = strings.ReplaceAll(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.To.UTC().Unix())) + + return sql +} + func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgconn.Result, error) { c, err := e.pool.Acquire(ctx) if err != nil { @@ -140,7 +241,7 @@ func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgc return mrr.ReadAll() } -func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup, +func (e *DataSourceHandler) executeQuery(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup, ch chan DBDataResponse, queryJSON QueryJson) { defer wg.Done() queryResult := DBDataResponse{ @@ -171,7 +272,7 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query return } - qm, err := e.newProcessCfgPGX(queryContext, query, results, interpolatedQuery) + qm, err := e.newProcessCfg(queryContext, query, results, interpolatedQuery) if err != nil { e.handleQueryError("failed to get configurations", err, interpolatedQuery, backend.ErrorSourceDownstream, ch, queryResult) return @@ -186,6 +287,47 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query e.processFrame(frame, qm, queryResult, ch, logger) } +// dataQueryFormat is the type of query. +type dataQueryFormat string + +const ( + // dataQueryFormatTable identifies a table query (default). + dataQueryFormatTable dataQueryFormat = "table" + // dataQueryFormatSeries identifies a time series query. + dataQueryFormatSeries dataQueryFormat = "time_series" +) + +type dataQueryModel struct { + InterpolatedQuery string // property not set until after Interpolate() + Format dataQueryFormat + TimeRange backend.TimeRange + FillMissing *data.FillMissing // property not set until after Interpolate() + Interval time.Duration + columnNames []string + columnTypes []string + timeIndex int + timeEndIndex int + metricIndex int + metricPrefix bool + queryContext context.Context +} + +func convertSQLTimeColumnsToEpochMS(frame *data.Frame, qm *dataQueryModel) error { + if qm.timeIndex != -1 { + if err := convertSQLTimeColumnToEpochMS(frame, qm.timeIndex); err != nil { + return fmt.Errorf("%v: %w", "failed to convert time column", err) + } + } + + if qm.timeEndIndex != -1 { + if err := convertSQLTimeColumnToEpochMS(frame, qm.timeEndIndex); err != nil { + return fmt.Errorf("%v: %w", "failed to convert timeend column", err) + } + } + + return nil +} + func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, queryResult DBDataResponse, ch chan DBDataResponse, logger log.Logger) { if frame.Meta == nil { frame.Meta = &data.FrameMeta{} @@ -281,10 +423,10 @@ func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, ch <- queryResult } -func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query backend.DataQuery, +func (e *DataSourceHandler) newProcessCfg(queryContext context.Context, query backend.DataQuery, results []*pgconn.Result, interpolatedQuery string) (*dataQueryModel, error) { columnNames := []string{} - columnTypesPGX := []string{} + columnTypes := []string{} // The results will contain column information in the metadata for _, result := range results { @@ -296,26 +438,26 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query // Handle special cases for field types switch field.DataTypeOID { case pgtype.TimetzOID: - columnTypesPGX = append(columnTypesPGX, "timetz") + columnTypes = append(columnTypes, "timetz") case 790: - columnTypesPGX = append(columnTypesPGX, "money") + columnTypes = append(columnTypes, "money") default: - columnTypesPGX = append(columnTypesPGX, "unknown") + columnTypes = append(columnTypes, "unknown") } } else { - columnTypesPGX = append(columnTypesPGX, pqtype.Name) + columnTypes = append(columnTypes, pqtype.Name) } } } qm := &dataQueryModel{ - columnTypesPGX: columnTypesPGX, - columnNames: columnNames, - timeIndex: -1, - timeEndIndex: -1, - metricIndex: -1, - metricPrefix: false, - queryContext: queryContext, + columnTypes: columnTypes, + columnNames: columnNames, + timeIndex: -1, + timeEndIndex: -1, + metricIndex: -1, + metricPrefix: false, + queryContext: queryContext, } queryJSON := QueryJson{} @@ -370,7 +512,7 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query qm.metricIndex = i default: if qm.metricIndex == -1 { - columnType := qm.columnTypesPGX[i] + columnType := qm.columnTypes[i] for _, mct := range e.metricColumnTypes { if columnType == mct { qm.metricIndex = i @@ -550,6 +692,10 @@ func convertPostgresValue(rawValue []byte, fd pgconn.FieldDescription, m *pgtype if err != nil { return nil, err } + // Handle null JSON values + if d == nil { + return nil, nil + } j := json.RawMessage(*d) return &j, nil default: @@ -592,3 +738,99 @@ func getFieldTypesFromDescriptions(fieldDescriptions []pgconn.FieldDescription, } return fieldTypes, nil } + +// convertSQLTimeColumnToEpochMS converts column named time to unix timestamp in milliseconds +// to make native datetime types and epoch dates work in annotation and table queries. +func convertSQLTimeColumnToEpochMS(frame *data.Frame, timeIndex int) error { + if timeIndex < 0 || timeIndex >= len(frame.Fields) { + return fmt.Errorf("timeIndex %d is out of range", timeIndex) + } + + origin := frame.Fields[timeIndex] + valueType := origin.Type() + if valueType == data.FieldTypeTime || valueType == data.FieldTypeNullableTime { + return nil + } + + newField := data.NewFieldFromFieldType(data.FieldTypeNullableTime, 0) + newField.Name = origin.Name + newField.Labels = origin.Labels + + valueLength := origin.Len() + for i := 0; i < valueLength; i++ { + v, err := origin.NullableFloatAt(i) + if err != nil { + return fmt.Errorf("unable to convert data to a time field") + } + if v == nil { + newField.Append(nil) + } else { + timestamp := time.Unix(0, int64(epochPrecisionToMS(*v))*int64(time.Millisecond)) + newField.Append(×tamp) + } + } + frame.Fields[timeIndex] = newField + + return nil +} + +// convertSQLValueColumnToFloat converts timeseries value column to float. +func convertSQLValueColumnToFloat(frame *data.Frame, Index int) (*data.Frame, error) { + if Index < 0 || Index >= len(frame.Fields) { + return frame, fmt.Errorf("metricIndex %d is out of range", Index) + } + + origin := frame.Fields[Index] + valueType := origin.Type() + if valueType == data.FieldTypeFloat64 || valueType == data.FieldTypeNullableFloat64 { + return frame, nil + } + + newField := data.NewFieldFromFieldType(data.FieldTypeNullableFloat64, origin.Len()) + newField.Name = origin.Name + newField.Labels = origin.Labels + + for i := 0; i < origin.Len(); i++ { + v, err := origin.NullableFloatAt(i) + if err != nil { + return frame, err + } + newField.Set(i, v) + } + + frame.Fields[Index] = newField + + return frame, nil +} + +// epochPrecisionToMS converts epoch precision to millisecond, if needed. +// Only seconds to milliseconds supported right now +func epochPrecisionToMS(value float64) float64 { + s := strconv.FormatFloat(value, 'e', -1, 64) + if strings.HasSuffix(s, "e+09") { + return value * float64(1e3) + } + + if strings.HasSuffix(s, "e+18") { + return value / float64(time.Millisecond) + } + + return value +} + +func isDownstreamError(err error) bool { + if backend.IsDownstreamError(err) { + return true + } + resultProcessingDownstreamErrors := []error{ + data.ErrorInputFieldsWithoutRows, + data.ErrorSeriesUnsorted, + data.ErrorNullTimeValues, + } + for _, e := range resultProcessingDownstreamErrors { + if errors.Is(err, e) { + return true + } + } + return false +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine_test.go b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine_test.go new file mode 100644 index 00000000000..2f161badb7d --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/pgx/sql_engine_test.go @@ -0,0 +1,681 @@ +package pgx + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/data" + "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgtype" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/grafana-plugin-sdk-go/backend/log" +) + +func Pointer[T any](v T) *T { return &v } + +func TestSQLEngine(t *testing.T) { + dt := time.Date(2018, 3, 14, 21, 20, 6, int(527345*time.Microsecond), time.UTC) + + t.Run("Handle interpolating $__interval and $__interval_ms", func(t *testing.T) { + from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC) + to := from.Add(5 * time.Minute) + timeRange := backend.TimeRange{From: from, To: to} + + text := "$__interval $__timeGroupAlias(time,$__interval) $__interval_ms" + + t.Run("interpolate 10 minutes $__interval", func(t *testing.T) { + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Minute * 10} + sql := Interpolate(query, timeRange, "", text) + require.Equal(t, "10m $__timeGroupAlias(time,10m) 600000", sql) + }) + + t.Run("interpolate 4seconds $__interval", func(t *testing.T) { + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 4} + sql := Interpolate(query, timeRange, "", text) + require.Equal(t, "4s $__timeGroupAlias(time,4s) 4000", sql) + }) + + t.Run("interpolate 200 milliseconds $__interval", func(t *testing.T) { + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Millisecond * 200} + sql := Interpolate(query, timeRange, "", text) + require.Equal(t, "200ms $__timeGroupAlias(time,200ms) 200", sql) + }) + }) + + t.Run("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func(t *testing.T) { + from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC) + to := from.Add(5 * time.Minute) + timeRange := backend.TimeRange{From: from, To: to} + query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 60} + + t.Run("interpolate __unixEpochFrom function", func(t *testing.T) { + sql := Interpolate(query, timeRange, "", "select $__unixEpochFrom()") + require.Equal(t, fmt.Sprintf("select %d", from.Unix()), sql) + }) + + t.Run("interpolate __unixEpochTo function", func(t *testing.T) { + sql := Interpolate(query, timeRange, "", "select $__unixEpochTo()") + require.Equal(t, fmt.Sprintf("select %d", to.Unix()), sql) + }) + }) + + t.Run("Given row values with int64 as time columns", func(t *testing.T) { + tSeconds := dt.Unix() + tMilliseconds := dt.UnixNano() / 1e6 + tNanoSeconds := dt.UnixNano() + var nilPointer *int64 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []int64{ + tSeconds, + }), + data.NewField("time2", nil, []*int64{ + Pointer(tSeconds), + }), + data.NewField("time3", nil, []int64{ + tMilliseconds, + }), + data.NewField("time4", nil, []*int64{ + Pointer(tMilliseconds), + }), + data.NewField("time5", nil, []int64{ + tNanoSeconds, + }), + data.NewField("time6", nil, []*int64{ + Pointer(tNanoSeconds), + }), + data.NewField("time7", nil, []*int64{ + nilPointer, + }), + ) + + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[6].At(0)) + }) + + t.Run("Given row values with uint64 as time columns", func(t *testing.T) { + tSeconds := uint64(dt.Unix()) + tMilliseconds := uint64(dt.UnixNano() / 1e6) + tNanoSeconds := uint64(dt.UnixNano()) + var nilPointer *uint64 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []uint64{ + tSeconds, + }), + data.NewField("time2", nil, []*uint64{ + Pointer(tSeconds), + }), + data.NewField("time3", nil, []uint64{ + tMilliseconds, + }), + data.NewField("time4", nil, []*uint64{ + Pointer(tMilliseconds), + }), + data.NewField("time5", nil, []uint64{ + tNanoSeconds, + }), + data.NewField("time6", nil, []*uint64{ + Pointer(tNanoSeconds), + }), + data.NewField("time7", nil, []*uint64{ + nilPointer, + }), + ) + + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[6].At(0)) + }) + + t.Run("Given row values with int32 as time columns", func(t *testing.T) { + tSeconds := int32(dt.Unix()) + var nilInt *int32 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []int32{ + tSeconds, + }), + data.NewField("time2", nil, []*int32{ + Pointer(tSeconds), + }), + data.NewField("time7", nil, []*int32{ + nilInt, + }), + ) + for i := 0; i < 3; i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[2].At(0)) + }) + + t.Run("Given row values with uint32 as time columns", func(t *testing.T) { + tSeconds := uint32(dt.Unix()) + var nilInt *uint32 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []uint32{ + tSeconds, + }), + data.NewField("time2", nil, []*uint32{ + Pointer(tSeconds), + }), + data.NewField("time7", nil, []*uint32{ + nilInt, + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[2].At(0)) + }) + + t.Run("Given row values with float64 as time columns", func(t *testing.T) { + tSeconds := float64(dt.UnixNano()) / float64(time.Second) + tMilliseconds := float64(dt.UnixNano()) / float64(time.Millisecond) + tNanoSeconds := float64(dt.UnixNano()) + var nilPointer *float64 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []float64{ + tSeconds, + }), + data.NewField("time2", nil, []*float64{ + Pointer(tSeconds), + }), + data.NewField("time3", nil, []float64{ + tMilliseconds, + }), + data.NewField("time4", nil, []*float64{ + Pointer(tMilliseconds), + }), + data.NewField("time5", nil, []float64{ + tNanoSeconds, + }), + data.NewField("time6", nil, []*float64{ + Pointer(tNanoSeconds), + }), + data.NewField("time7", nil, []*float64{ + nilPointer, + }), + ) + + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + + require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix()) + require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[6].At(0)) + }) + + t.Run("Given row values with float32 as time columns", func(t *testing.T) { + tSeconds := float32(dt.Unix()) + var nilInt *float32 + + originFrame := data.NewFrame("", + data.NewField("time1", nil, []float32{ + tSeconds, + }), + data.NewField("time2", nil, []*float32{ + Pointer(tSeconds), + }), + data.NewField("time7", nil, []*float32{ + nilInt, + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + err := convertSQLTimeColumnToEpochMS(originFrame, i) + require.NoError(t, err) + } + require.Equal(t, int64(tSeconds), (*originFrame.Fields[0].At(0).(*time.Time)).Unix()) + require.Equal(t, int64(tSeconds), (*originFrame.Fields[1].At(0).(*time.Time)).Unix()) + require.Nil(t, originFrame.Fields[2].At(0)) + }) + + t.Run("Given row with value columns, would be converted to float64", func(t *testing.T) { + originFrame := data.NewFrame("", + data.NewField("value1", nil, []int64{ + int64(1), + }), + data.NewField("value2", nil, []*int64{ + Pointer(int64(1)), + }), + data.NewField("value3", nil, []int32{ + int32(1), + }), + data.NewField("value4", nil, []*int32{ + Pointer(int32(1)), + }), + data.NewField("value5", nil, []int16{ + int16(1), + }), + data.NewField("value6", nil, []*int16{ + Pointer(int16(1)), + }), + data.NewField("value7", nil, []int8{ + int8(1), + }), + data.NewField("value8", nil, []*int8{ + Pointer(int8(1)), + }), + data.NewField("value9", nil, []float64{ + float64(1), + }), + data.NewField("value10", nil, []*float64{ + Pointer(1.0), + }), + data.NewField("value11", nil, []float32{ + float32(1), + }), + data.NewField("value12", nil, []*float32{ + Pointer(float32(1)), + }), + data.NewField("value13", nil, []uint64{ + uint64(1), + }), + data.NewField("value14", nil, []*uint64{ + Pointer(uint64(1)), + }), + data.NewField("value15", nil, []uint32{ + uint32(1), + }), + data.NewField("value16", nil, []*uint32{ + Pointer(uint32(1)), + }), + data.NewField("value17", nil, []uint16{ + uint16(1), + }), + data.NewField("value18", nil, []*uint16{ + Pointer(uint16(1)), + }), + data.NewField("value19", nil, []uint8{ + uint8(1), + }), + data.NewField("value20", nil, []*uint8{ + Pointer(uint8(1)), + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + _, err := convertSQLValueColumnToFloat(originFrame, i) + require.NoError(t, err) + if i == 8 { + require.Equal(t, float64(1), originFrame.Fields[i].At(0).(float64)) + } else { + require.NotNil(t, originFrame.Fields[i].At(0).(*float64)) + require.Equal(t, float64(1), *originFrame.Fields[i].At(0).(*float64)) + } + } + }) + + t.Run("Given row with nil value columns", func(t *testing.T) { + var int64NilPointer *int64 + var int32NilPointer *int32 + var int16NilPointer *int16 + var int8NilPointer *int8 + var float64NilPointer *float64 + var float32NilPointer *float32 + var uint64NilPointer *uint64 + var uint32NilPointer *uint32 + var uint16NilPointer *uint16 + var uint8NilPointer *uint8 + + originFrame := data.NewFrame("", + data.NewField("value1", nil, []*int64{ + int64NilPointer, + }), + data.NewField("value2", nil, []*int32{ + int32NilPointer, + }), + data.NewField("value3", nil, []*int16{ + int16NilPointer, + }), + data.NewField("value4", nil, []*int8{ + int8NilPointer, + }), + data.NewField("value5", nil, []*float64{ + float64NilPointer, + }), + data.NewField("value6", nil, []*float32{ + float32NilPointer, + }), + data.NewField("value7", nil, []*uint64{ + uint64NilPointer, + }), + data.NewField("value8", nil, []*uint32{ + uint32NilPointer, + }), + data.NewField("value9", nil, []*uint16{ + uint16NilPointer, + }), + data.NewField("value10", nil, []*uint8{ + uint8NilPointer, + }), + ) + for i := 0; i < len(originFrame.Fields); i++ { + t.Run("", func(t *testing.T) { + _, err := convertSQLValueColumnToFloat(originFrame, i) + require.NoError(t, err) + require.Nil(t, originFrame.Fields[i].At(0)) + }) + } + }) + + t.Run("Should not return raw connection errors", func(t *testing.T) { + err := net.OpError{Op: "Dial", Err: fmt.Errorf("inner-error")} + transformer := &testQueryResultTransformer{} + dp := DataSourceHandler{ + log: backend.NewLoggerWith("logger", "test"), + queryResultTransformer: transformer, + } + resultErr := dp.TransformQueryError(dp.log, &err) + assert.False(t, transformer.transformQueryErrorWasCalled) + errorText := resultErr.Error() + assert.NotEqual(t, err, resultErr) + assert.NotContains(t, errorText, "inner-error") + assert.Contains(t, errorText, "failed to connect to server") + }) + + t.Run("Should return non-connection errors unmodified", func(t *testing.T) { + err := fmt.Errorf("normal error") + transformer := &testQueryResultTransformer{} + dp := DataSourceHandler{ + log: backend.NewLoggerWith("logger", "test"), + queryResultTransformer: transformer, + } + resultErr := dp.TransformQueryError(dp.log, err) + assert.True(t, transformer.transformQueryErrorWasCalled) + assert.Equal(t, err, resultErr) + assert.ErrorIs(t, err, resultErr) + }) +} + +func TestConvertResultsToFrame(t *testing.T) { + // Import the pgx packages needed for testing + // These imports are included in the main file but need to be accessible for tests + t.Run("convertResultsToFrame with single result", func(t *testing.T) { + // Create mock field descriptions + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + {Name: "name", DataTypeOID: pgtype.TextOID}, + {Name: "value", DataTypeOID: pgtype.Float8OID}, + } + + // Create mock result data + mockRows := [][][]byte{ + {[]byte("1"), []byte("test1"), []byte("10.5")}, + {[]byte("2"), []byte("test2"), []byte("20.7")}, + } + + // Create mock result + result := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + result.CommandTag = pgconn.NewCommandTag("SELECT 2") + + results := []*pgconn.Result{result} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 3, len(frame.Fields)) + require.Equal(t, 2, frame.Rows()) + + // Verify field names + require.Equal(t, "id", frame.Fields[0].Name) + require.Equal(t, "name", frame.Fields[1].Name) + require.Equal(t, "value", frame.Fields[2].Name) + }) + + t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) { + // Create mock field descriptions (same structure for both results) + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + {Name: "name", DataTypeOID: pgtype.TextOID}, + } + + // Create first result + mockRows1 := [][][]byte{ + {[]byte("1"), []byte("test1")}, + {[]byte("2"), []byte("test2")}, + } + result1 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows1, + } + result1.CommandTag = pgconn.NewCommandTag("SELECT 2") + + // Create second result with same structure + mockRows2 := [][][]byte{ + {[]byte("3"), []byte("test3")}, + {[]byte("4"), []byte("test4")}, + } + result2 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows2, + } + result2.CommandTag = pgconn.NewCommandTag("SELECT 2") + + results := []*pgconn.Result{result1, result2} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 2, len(frame.Fields)) + require.Equal(t, 4, frame.Rows()) // Should have rows from both results + + // Verify field names + require.Equal(t, "id", frame.Fields[0].Name) + require.Equal(t, "name", frame.Fields[1].Name) + }) + + t.Run("convertResultsToFrame with row limit", func(t *testing.T) { + // Create mock field descriptions + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + } + + // Create mock result data with 3 rows + mockRows := [][][]byte{ + {[]byte("1")}, + {[]byte("2")}, + {[]byte("3")}, + } + + result := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + result.CommandTag = pgconn.NewCommandTag("SELECT 3") + + results := []*pgconn.Result{result} + + // Set row limit to 2 + frame, err := convertResultsToFrame(results, 2) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 1, len(frame.Fields)) + require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows + + // Should have a notice about the limit + require.NotNil(t, frame.Meta) + require.Len(t, frame.Meta.Notices, 1) + require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") + }) + + t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) { + // Create a non-SELECT result (should be skipped) + nonSelectResult := &pgconn.Result{} + nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1") + + // Create a SELECT result + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + } + mockRows := [][][]byte{ + {[]byte("1")}, + } + selectResult := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1") + + results := []*pgconn.Result{nonSelectResult, selectResult} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 1, len(frame.Fields)) + require.Equal(t, 1, frame.Rows()) + }) + + t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) { + // Create only non-SELECT results + result1 := &pgconn.Result{} + result1.CommandTag = pgconn.NewCommandTag("UPDATE 1") + + result2 := &pgconn.Result{} + result2.CommandTag = pgconn.NewCommandTag("INSERT 1") + + results := []*pgconn.Result{result1, result2} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 0, len(frame.Fields)) + require.Equal(t, 0, frame.Rows()) + }) + + t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) { + // Create mock field descriptions (same structure for both results) + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + } + + // Create first result with 3 rows + mockRows1 := [][][]byte{ + {[]byte("1")}, + {[]byte("2")}, + {[]byte("3")}, + } + result1 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows1, + } + result1.CommandTag = pgconn.NewCommandTag("SELECT 3") + + // Create second result with 3 rows + mockRows2 := [][][]byte{ + {[]byte("4")}, + {[]byte("5")}, + {[]byte("6")}, + } + result2 := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows2, + } + result2.CommandTag = pgconn.NewCommandTag("SELECT 3") + + results := []*pgconn.Result{result1, result2} + + // Set row limit to 2 (should limit each result to 2 rows) + frame, err := convertResultsToFrame(results, 2) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 1, len(frame.Fields)) + require.Equal(t, 4, frame.Rows()) // 2 rows from each result + + // Should have notices about the limit from both results + require.NotNil(t, frame.Meta) + require.Len(t, frame.Meta.Notices, 2) + require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") + require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2") + }) + + t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) { + // Create mock field descriptions + fieldDescs := []pgconn.FieldDescription{ + {Name: "id", DataTypeOID: pgtype.Int4OID}, + {Name: "name", DataTypeOID: pgtype.TextOID}, + } + + // Create mock result data with null values + mockRows := [][][]byte{ + {[]byte("1"), nil}, // null name + {nil, []byte("test2")}, // null id + } + + result := &pgconn.Result{ + FieldDescriptions: fieldDescs, + Rows: mockRows, + } + result.CommandTag = pgconn.NewCommandTag("SELECT 2") + + results := []*pgconn.Result{result} + + frame, err := convertResultsToFrame(results, 1000) + require.NoError(t, err) + require.NotNil(t, frame) + require.Equal(t, 2, len(frame.Fields)) + require.Equal(t, 2, frame.Rows()) + + // Check that null values are handled correctly + // The exact representation depends on the field type, but should not panic + require.NotPanics(t, func() { + frame.Fields[0].At(1) // null id + frame.Fields[1].At(0) // null name + }) + }) +} + +type testQueryResultTransformer struct { + transformQueryErrorWasCalled bool +} + +func (t *testQueryResultTransformer) TransformQueryError(_ log.Logger, err error) error { + t.transformQueryErrorWasCalled = true + return err +} + +func (t *testQueryResultTransformer) GetConverterList() []sqlutil.StringConverter { + return nil +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres.go b/pkg/tsdb/grafana-postgresql-datasource/postgres.go index a889ef662a7..ca4b4eb5565 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres.go @@ -16,56 +16,14 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" "github.com/grafana/grafana/pkg/services/featuremgmt" - "github.com/grafana/grafana/pkg/setting" "github.com/jackc/pgx/v5/pgxpool" "github.com/lib/pq" "github.com/grafana/grafana-plugin-sdk-go/backend/log" + sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx" "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" ) -func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service { - logger := backend.NewLoggerWith("logger", "tsdb.postgres") - s := &Service{ - tlsManager: newTLSManager(logger, cfg.DataPath), - pgxTlsManager: newPgxTlsManager(logger), - logger: logger, - features: features, - } - s.im = datasource.NewInstanceManager(s.newInstanceSettings()) - return s -} - -type Service struct { - tlsManager tlsSettingsProvider - pgxTlsManager *pgxTlsManager - im instancemgmt.InstanceManager - logger log.Logger - features featuremgmt.FeatureToggles -} - -func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) { - i, err := s.im.Get(ctx, pluginCtx) - if err != nil { - return nil, err - } - instance := i.(*sqleng.DataSourceHandler) - return instance, nil -} - -func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { - dsInfo, err := s.getDSInfo(ctx, req.PluginContext) - if err != nil { - return nil, err - } - - if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { - return dsInfo.QueryDataPGX(ctx, req) - } - - return dsInfo.QueryData(ctx, req) -} - func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*sql.DB, *sqleng.DataSourceHandler, error) { connector, err := pq.NewConnector(cnnstr) if err != nil { @@ -115,7 +73,7 @@ func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit in return db, handler, nil } -func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqleng.DataSourceHandler, error) { +func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqlengpgx.DataSourceHandler, error) { pgxConf, err := pgxpool.ParseConfig(cnnstr) if err != nil { logger.Error("postgres config creation failed", "error", err) @@ -144,7 +102,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit return []string{host}, nil } - config := sqleng.DataPluginConfiguration{ + config := sqlengpgx.DataPluginConfiguration{ DSInfo: dsInfo, MetricColumnTypes: []string{"unknown", "text", "varchar", "char", "bpchar"}, RowLimit: rowLimit, @@ -160,7 +118,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit return nil, nil, err } - handler, err := sqleng.NewQueryDataHandlerPGX(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb), + handler, err := sqlengpgx.NewQueryDataHandler(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb), logger) if err != nil { logger.Error("Failed connecting to Postgres", "err", err) @@ -171,8 +129,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit return p, handler, nil } -func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc { - logger := s.logger +func NewInstanceSettings(logger log.Logger, features featuremgmt.FeatureToggles, dataPath string) datasource.InstanceFactoryFunc { return func(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { cfg := backend.GrafanaConfigFromContext(ctx) sqlCfg, err := cfg.SQL() @@ -210,49 +167,53 @@ func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc { DecryptedSecureJSONData: settings.DecryptedSecureJSONData, } - isPGX := s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) + isPGX := features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) userFacingDefaultError, err := cfg.UserFacingDefaultError() if err != nil { return nil, err } - var handler instancemgmt.Instance if isPGX { - pgxTlsSettings, err := s.pgxTlsManager.getTLSSettings(dsInfo) + pgxlogger := logger.FromContext(ctx).With("driver", "pgx") + pgxTlsManager := newPgxTlsManager(pgxlogger) + pgxTlsSettings, err := pgxTlsManager.getTLSSettings(dsInfo) if err != nil { return "", err } // Ensure cleanupCertFiles is called after the connection is opened - defer s.pgxTlsManager.cleanupCertFiles(pgxTlsSettings) - cnnstr, err := s.generateConnectionString(dsInfo, pgxTlsSettings, isPGX) + defer pgxTlsManager.cleanupCertFiles(pgxTlsSettings) + cnnstr, err := generateConnectionString(dsInfo, pgxTlsSettings, isPGX, pgxlogger) if err != nil { return "", err } - _, handler, err = newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings) + _, handler, err := newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pgxlogger, settings) if err != nil { - logger.Error("Failed connecting to Postgres", "err", err) + pgxlogger.Error("Failed connecting to Postgres", "err", err) return nil, err } + pgxlogger.Debug("Successfully connected to Postgres") + return handler, nil } else { - tlsSettings, err := s.tlsManager.getTLSSettings(dsInfo) + pqlogger := logger.FromContext(ctx).With("driver", "libpq") + tlsManager := newTLSManager(pqlogger, dataPath) + tlsSettings, err := tlsManager.getTLSSettings(dsInfo) if err != nil { return "", err } - cnnstr, err := s.generateConnectionString(dsInfo, tlsSettings, isPGX) + cnnstr, err := generateConnectionString(dsInfo, tlsSettings, isPGX, pqlogger) if err != nil { return nil, err } - _, handler, err = newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings) + _, handler, err := newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pqlogger, settings) if err != nil { - logger.Error("Failed connecting to Postgres", "err", err) + pqlogger.Error("Failed connecting to Postgres", "err", err) return nil, err } + pqlogger.Debug("Successfully connected to Postgres") + return handler, nil } - - logger.Debug("Successfully connected to Postgres") - return handler, nil } } @@ -342,9 +303,7 @@ func buildBaseConnectionString(params connectionParams) string { return connStr } -func (s *Service) generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool) (string, error) { - logger := s.logger - +func generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool, logger log.Logger) (string, error) { params, err := parseConnectionParams(dsInfo, logger) if err != nil { return "", err @@ -387,15 +346,6 @@ func (t *postgresQueryResultTransformer) TransformQueryError(_ log.Logger, err e return err } -// CheckHealth pings the connected SQL database -func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { - dsHandler, err := s.getDSInfo(ctx, req.PluginContext) - if err != nil { - return sqleng.ErrToHealthCheckResult(err) - } - return dsHandler.CheckHealth(ctx, req, s.features) -} - func (t *postgresQueryResultTransformer) GetConverterList() []sqlutil.StringConverter { return []sqlutil.StringConverter{ { diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go index d92a1c78958..f65267dbb2f 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_snapshot_test.go @@ -127,6 +127,7 @@ func TestIntegrationPostgresPGXSnapshots(t *testing.T) { {format: "table", name: "types_datetime_pgx"}, {format: "table", name: "types_other"}, {format: "table", name: "types_enum"}, + {format: "table", name: "types_jsonb"}, {format: "table", name: "timestamp_convert_bigint"}, {format: "table", name: "timestamp_convert_integer"}, {format: "table", name: "timestamp_convert_real"}, @@ -185,7 +186,7 @@ func TestIntegrationPostgresPGXSnapshots(t *testing.T) { query := makeQuery(rawSQL, test.format) - result, err := handler.QueryDataPGX(context.Background(), &query) + result, err := handler.QueryData(context.Background(), &query) require.Len(t, result.Responses, 1) response, found := result.Responses["A"] require.True(t, found) diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go index 6e14282d824..7c66bde7580 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_pgx_test.go @@ -151,10 +151,6 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) { } for _, tt := range testCases { t.Run(tt.desc, func(t *testing.T) { - svc := Service{ - logger: backend.NewLoggerWith("logger", "tsdb.postgres"), - } - ds := sqleng.DataSourceInfo{ URL: tt.host, User: tt.user, @@ -162,8 +158,9 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) { Database: tt.database, UID: tt.uid, } + logger := backend.NewLoggerWith("logger", "tsdb.postgres") - connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false) + connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger) if tt.expErr == "" { require.NoError(t, err, tt.desc) @@ -284,7 +281,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -383,7 +380,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -426,7 +423,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -460,7 +457,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] frames := queryResult.Frames @@ -488,7 +485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -542,7 +539,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -589,7 +586,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -624,7 +621,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -741,7 +738,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -765,7 +762,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -789,7 +786,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -813,7 +810,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -837,7 +834,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -861,7 +858,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -885,7 +882,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -910,7 +907,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -934,7 +931,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -959,7 +956,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -991,7 +988,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1026,7 +1023,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1086,7 +1083,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["Deploys"] @@ -1113,7 +1110,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["Tickets"] @@ -1136,7 +1133,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1161,7 +1158,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1186,7 +1183,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1212,7 +1209,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1238,7 +1235,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1264,7 +1261,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1290,7 +1287,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1338,7 +1335,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := handler.QueryDataPGX(t.Context(), query) + resp, err := handler.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1368,7 +1365,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := handler.QueryDataPGX(t.Context(), query) + resp, err := handler.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1406,7 +1403,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { }, } - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] @@ -1453,7 +1450,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic and should work correctly - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) @@ -1488,7 +1485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic anymore, but should return an error instead - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] @@ -1517,7 +1514,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic anymore, but should return an error instead - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] @@ -1546,7 +1543,7 @@ func TestIntegrationPostgresPGX(t *testing.T) { } // This should not panic - resp, err := exe.QueryDataPGX(t.Context(), query) + resp, err := exe.QueryData(t.Context(), query) require.NoError(t, err) queryResult := resp.Responses["A"] require.NoError(t, queryResult.Error) diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_service.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_service.go new file mode 100644 index 00000000000..5a1050ba3a5 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_service.go @@ -0,0 +1,80 @@ +package postgres + +import ( + "context" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" + "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" + + "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/setting" + sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx" + "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng" +) + +type Service struct { + im instancemgmt.InstanceManager + features featuremgmt.FeatureToggles +} + +func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service { + logger := backend.NewLoggerWith("logger", "tsdb.postgres") + s := &Service{ + im: datasource.NewInstanceManager(NewInstanceSettings(logger, features, cfg.DataPath)), + features: features, + } + return s +} + +// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside" +func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { + dsHandler, err := s.getDSInfoPGX(ctx, req.PluginContext) + if err != nil { + return sqlengpgx.ErrToHealthCheckResult(err) + } + return dsHandler.CheckHealth(ctx, req) + } else { + dsHandler, err := s.getDSInfo(ctx, req.PluginContext) + if err != nil { + return sqleng.ErrToHealthCheckResult(err) + } + return dsHandler.CheckHealth(ctx, req) + } +} + +// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside" +func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { + if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { + dsInfo, err := s.getDSInfoPGX(ctx, req.PluginContext) + if err != nil { + return nil, err + } + return dsInfo.QueryData(ctx, req) + } else { + dsInfo, err := s.getDSInfo(ctx, req.PluginContext) + if err != nil { + return nil, err + } + return dsInfo.QueryData(ctx, req) + } +} + +func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) { + i, err := s.im.Get(ctx, pluginCtx) + if err != nil { + return nil, err + } + instance := i.(*sqleng.DataSourceHandler) + return instance, nil +} + +func (s *Service) getDSInfoPGX(ctx context.Context, pluginCtx backend.PluginContext) (*sqlengpgx.DataSourceHandler, error) { + i, err := s.im.Get(ctx, pluginCtx) + if err != nil { + return nil, err + } + instance := i.(*sqlengpgx.DataSourceHandler) + return instance, nil +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go b/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go index b71bd23b387..7d6e99fcb92 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/postgres_test.go @@ -156,10 +156,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) { } for _, tt := range testCases { t.Run(tt.desc, func(t *testing.T) { - svc := Service{ - tlsManager: &tlsTestManager{settings: tt.tlsSettings}, - logger: backend.NewLoggerWith("logger", "tsdb.postgres"), - } + logger := backend.NewLoggerWith("logger", "tsdb.postgres") ds := sqleng.DataSourceInfo{ URL: tt.host, @@ -169,7 +166,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) { UID: tt.uid, } - connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false) + connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger) if tt.expErr == "" { require.NoError(t, err, tt.desc) @@ -1409,14 +1406,6 @@ func genTimeRangeByInterval(from time.Time, duration time.Duration, interval tim return timeRange } -type tlsTestManager struct { - settings tlsSettings -} - -func (m *tlsTestManager) getTLSSettings(dsInfo sqleng.DataSourceInfo) (tlsSettings, error) { - return m.settings, nil -} - func isTestDbPostgres() bool { if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present { return db == "postgres" diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go b/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go index 99d36aaaa85..224d00eace5 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go +++ b/pkg/tsdb/grafana-postgresql-datasource/sqleng/handler_checkhealth.go @@ -10,17 +10,11 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/log" - "github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/lib/pq" ) -func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest, features featuremgmt.FeatureToggles) (*backend.CheckHealthResult, error) { - var err error - if features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) { - err = e.PingPGX(ctx) - } else { - err = e.Ping() - } +func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + err := e.Ping() if err != nil { logCheckHealthError(ctx, e.dsInfo, err) if strings.EqualFold(req.PluginContext.User.Role, "Admin") { diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go index ca3a9dff36c..fdaac59c6f1 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go +++ b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine.go @@ -19,7 +19,6 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" - "github.com/jackc/pgx/v5/pgxpool" ) // MetaKeyExecutedQueryString is the key where the executed query should get stored @@ -89,7 +88,6 @@ type DataSourceHandler struct { dsInfo DataSourceInfo rowLimit int64 userError string - pool *pgxpool.Pool } type QueryJson struct { @@ -490,7 +488,6 @@ type dataQueryModel struct { Interval time.Duration columnNames []string columnTypes []*sql.ColumnType - columnTypesPGX []string timeIndex int timeEndIndex int metricIndex int diff --git a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go index f0f5f5b7a9b..4d511a53f25 100644 --- a/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go +++ b/pkg/tsdb/grafana-postgresql-datasource/sqleng/sql_engine_test.go @@ -9,8 +9,6 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" - "github.com/jackc/pgx/v5/pgconn" - "github.com/jackc/pgx/v5/pgtype" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -427,246 +425,6 @@ func TestSQLEngine(t *testing.T) { }) } -func TestConvertResultsToFrame(t *testing.T) { - // Import the pgx packages needed for testing - // These imports are included in the main file but need to be accessible for tests - t.Run("convertResultsToFrame with single result", func(t *testing.T) { - // Create mock field descriptions - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - {Name: "name", DataTypeOID: pgtype.TextOID}, - {Name: "value", DataTypeOID: pgtype.Float8OID}, - } - - // Create mock result data - mockRows := [][][]byte{ - {[]byte("1"), []byte("test1"), []byte("10.5")}, - {[]byte("2"), []byte("test2"), []byte("20.7")}, - } - - // Create mock result - result := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - result.CommandTag = pgconn.NewCommandTag("SELECT 2") - - results := []*pgconn.Result{result} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 3, len(frame.Fields)) - require.Equal(t, 2, frame.Rows()) - - // Verify field names - require.Equal(t, "id", frame.Fields[0].Name) - require.Equal(t, "name", frame.Fields[1].Name) - require.Equal(t, "value", frame.Fields[2].Name) - }) - - t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) { - // Create mock field descriptions (same structure for both results) - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - {Name: "name", DataTypeOID: pgtype.TextOID}, - } - - // Create first result - mockRows1 := [][][]byte{ - {[]byte("1"), []byte("test1")}, - {[]byte("2"), []byte("test2")}, - } - result1 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows1, - } - result1.CommandTag = pgconn.NewCommandTag("SELECT 2") - - // Create second result with same structure - mockRows2 := [][][]byte{ - {[]byte("3"), []byte("test3")}, - {[]byte("4"), []byte("test4")}, - } - result2 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows2, - } - result2.CommandTag = pgconn.NewCommandTag("SELECT 2") - - results := []*pgconn.Result{result1, result2} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 2, len(frame.Fields)) - require.Equal(t, 4, frame.Rows()) // Should have rows from both results - - // Verify field names - require.Equal(t, "id", frame.Fields[0].Name) - require.Equal(t, "name", frame.Fields[1].Name) - }) - - t.Run("convertResultsToFrame with row limit", func(t *testing.T) { - // Create mock field descriptions - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - } - - // Create mock result data with 3 rows - mockRows := [][][]byte{ - {[]byte("1")}, - {[]byte("2")}, - {[]byte("3")}, - } - - result := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - result.CommandTag = pgconn.NewCommandTag("SELECT 3") - - results := []*pgconn.Result{result} - - // Set row limit to 2 - frame, err := convertResultsToFrame(results, 2) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 1, len(frame.Fields)) - require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows - - // Should have a notice about the limit - require.NotNil(t, frame.Meta) - require.Len(t, frame.Meta.Notices, 1) - require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") - }) - - t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) { - // Create a non-SELECT result (should be skipped) - nonSelectResult := &pgconn.Result{} - nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1") - - // Create a SELECT result - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - } - mockRows := [][][]byte{ - {[]byte("1")}, - } - selectResult := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1") - - results := []*pgconn.Result{nonSelectResult, selectResult} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 1, len(frame.Fields)) - require.Equal(t, 1, frame.Rows()) - }) - - t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) { - // Create only non-SELECT results - result1 := &pgconn.Result{} - result1.CommandTag = pgconn.NewCommandTag("UPDATE 1") - - result2 := &pgconn.Result{} - result2.CommandTag = pgconn.NewCommandTag("INSERT 1") - - results := []*pgconn.Result{result1, result2} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 0, len(frame.Fields)) - require.Equal(t, 0, frame.Rows()) - }) - - t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) { - // Create mock field descriptions (same structure for both results) - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - } - - // Create first result with 3 rows - mockRows1 := [][][]byte{ - {[]byte("1")}, - {[]byte("2")}, - {[]byte("3")}, - } - result1 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows1, - } - result1.CommandTag = pgconn.NewCommandTag("SELECT 3") - - // Create second result with 3 rows - mockRows2 := [][][]byte{ - {[]byte("4")}, - {[]byte("5")}, - {[]byte("6")}, - } - result2 := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows2, - } - result2.CommandTag = pgconn.NewCommandTag("SELECT 3") - - results := []*pgconn.Result{result1, result2} - - // Set row limit to 2 (should limit each result to 2 rows) - frame, err := convertResultsToFrame(results, 2) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 1, len(frame.Fields)) - require.Equal(t, 4, frame.Rows()) // 2 rows from each result - - // Should have notices about the limit from both results - require.NotNil(t, frame.Meta) - require.Len(t, frame.Meta.Notices, 2) - require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2") - require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2") - }) - - t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) { - // Create mock field descriptions - fieldDescs := []pgconn.FieldDescription{ - {Name: "id", DataTypeOID: pgtype.Int4OID}, - {Name: "name", DataTypeOID: pgtype.TextOID}, - } - - // Create mock result data with null values - mockRows := [][][]byte{ - {[]byte("1"), nil}, // null name - {nil, []byte("test2")}, // null id - } - - result := &pgconn.Result{ - FieldDescriptions: fieldDescs, - Rows: mockRows, - } - result.CommandTag = pgconn.NewCommandTag("SELECT 2") - - results := []*pgconn.Result{result} - - frame, err := convertResultsToFrame(results, 1000) - require.NoError(t, err) - require.NotNil(t, frame) - require.Equal(t, 2, len(frame.Fields)) - require.Equal(t, 2, frame.Rows()) - - // Check that null values are handled correctly - // The exact representation depends on the field type, but should not panic - require.NotPanics(t, func() { - frame.Fields[0].At(1) // null id - frame.Fields[1].At(0) // null name - }) - }) -} - type testQueryResultTransformer struct { transformQueryErrorWasCalled bool } diff --git a/pkg/tsdb/grafana-postgresql-datasource/standalone/main.go b/pkg/tsdb/grafana-postgresql-datasource/standalone/main.go new file mode 100644 index 00000000000..c13adbb7825 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/standalone/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "os" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" + "github.com/grafana/grafana-plugin-sdk-go/backend/log" + "github.com/grafana/grafana/pkg/services/featuremgmt" + "github.com/grafana/grafana/pkg/setting" + postgres "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource" +) + +func main() { + // No need to pass logger name, it will be set by the plugin SDK + logger := backend.NewLoggerWith() + // TODO: get rid of setting.NewCfg() and featuremgmt.FeatureToggles once PostgresDSUsePGX is removed + cfg := setting.NewCfg() + // We want to enable the feature toggle for api server + features := featuremgmt.WithFeatures(featuremgmt.FlagPostgresDSUsePGX) + if err := datasource.Manage("grafana-postgresql-datasource", postgres.NewInstanceSettings(logger, features, cfg.DataPath), datasource.ManageOpts{}); err != nil { + log.DefaultLogger.Error(err.Error()) + os.Exit(1) + } +} diff --git a/pkg/tsdb/grafana-postgresql-datasource/testdata/table/types_jsonb.golden.jsonc b/pkg/tsdb/grafana-postgresql-datasource/testdata/table/types_jsonb.golden.jsonc new file mode 100644 index 00000000000..54b44837d22 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/testdata/table/types_jsonb.golden.jsonc @@ -0,0 +1,277 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "typeVersion": [ +// 0, +// 0 +// ], +// "executedQueryString": "SELECT * FROM tbl" +// } +// Name: +// Dimensions: 9 Fields by 4 Rows +// +----------------+---------------------------------------------------------------+-------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------+-----------------------------------------------------------------------------+--------------------------+--------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------+ +// | Name: id | Name: simple_json | Name: simple_json_nn | Name: complex_json | Name: array_json | Name: nested_json | Name: empty_json | Name: null_json | Name: network_response | +// | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | Labels: | +// | Type: []*int32 | Type: []*json.RawMessage | Type: []*json.RawMessage | Type: []*json.RawMessage | Type: []*json.RawMessage | Type: []*json.RawMessage | Type: []*json.RawMessage | Type: []*json.RawMessage | Type: []*json.RawMessage | +// +----------------+---------------------------------------------------------------+-------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------+-----------------------------------------------------------------------------+--------------------------+--------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------+ +// | 1 | {"key": "value"} | {"status": "active"} | {"user": {"id": 123, "name": "John Doe", "email": "john@example.com"}, "metadata": {"tags": ["user", "active"], "created": "2023-01-01"}} | [1, 2, 3, "test"] | {"level1": {"level2": {"level3": {"data": "deep"}}}} | {} | null | {"code": "network_error", "message": "The restore request was not successful The request failed because the MSISDN does not exist in the PPS."} | +// | 2 | {"number": 42, "boolean": true} | {"status": "inactive"} | {"product": {"id": "abc-123", "price": 99.99, "available": true}, "inventory": {"count": 5, "warehouse": "US-WEST"}} | [] | {"config": {"database": {"ssl": false, "host": "localhost", "port": 5432}}} | [] | null | {"code": "network_error", "message": "The imei for the device 007abee2-2105-459b-b4a9-d218c8180a84 could not be found"} | +// | 3 | null | {"status": "pending"} | null | [{"id": 1, "name": "item1"}, {"id": 2, "name": "item2"}] | null | {"empty": {}} | null | [] | +// | 4 | {"special_chars": "test with spaces and symbols: !@#$%^&*()"} | {"unicode": "测试 🚀 émojis"} | {"error": {"code": 500, "details": {"timestamp": "2023-12-24T14:30:00Z", "stack_trace": "Error at line 42"}, "message": "Internal server error"}} | [null, true, false, 0, "", {}] | {"a": {"b": {"c": {"d": {"e": "very deep nesting"}}}}} | null | null | {"code": "network_error", "message": "The imei for the device cdfd9055-3b61-46f3-945a-dfd0f6aa4f27 could not be found"} | +// +----------------+---------------------------------------------------------------+-------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------------------------------------+-----------------------------------------------------------------------------+--------------------------+--------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "meta": { + "typeVersion": [ + 0, + 0 + ], + "executedQueryString": "SELECT * FROM tbl" + }, + "fields": [ + { + "name": "id", + "type": "number", + "typeInfo": { + "frame": "int32", + "nullable": true + } + }, + { + "name": "simple_json", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "simple_json_nn", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "complex_json", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "array_json", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "nested_json", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "empty_json", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "null_json", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + }, + { + "name": "network_response", + "type": "other", + "typeInfo": { + "frame": "json.RawMessage", + "nullable": true + } + } + ] + }, + "data": { + "values": [ + [ + 1, + 2, + 3, + 4 + ], + [ + { + "key": "value" + }, + { + "number": 42, + "boolean": true + }, + null, + { + "special_chars": "test with spaces and symbols: !@#$%^\u0026*()" + } + ], + [ + { + "status": "active" + }, + { + "status": "inactive" + }, + { + "status": "pending" + }, + { + "unicode": "测试 🚀 émojis" + } + ], + [ + { + "user": { + "id": 123, + "name": "John Doe", + "email": "john@example.com" + }, + "metadata": { + "tags": [ + "user", + "active" + ], + "created": "2023-01-01" + } + }, + { + "product": { + "id": "abc-123", + "price": 99.99, + "available": true + }, + "inventory": { + "count": 5, + "warehouse": "US-WEST" + } + }, + null, + { + "error": { + "code": 500, + "details": { + "timestamp": "2023-12-24T14:30:00Z", + "stack_trace": "Error at line 42" + }, + "message": "Internal server error" + } + } + ], + [ + [ + 1, + 2, + 3, + "test" + ], + [], + [ + { + "id": 1, + "name": "item1" + }, + { + "id": 2, + "name": "item2" + } + ], + [ + null, + true, + false, + 0, + "", + {} + ] + ], + [ + { + "level1": { + "level2": { + "level3": { + "data": "deep" + } + } + } + }, + { + "config": { + "database": { + "ssl": false, + "host": "localhost", + "port": 5432 + } + } + }, + null, + { + "a": { + "b": { + "c": { + "d": { + "e": "very deep nesting" + } + } + } + } + } + ], + [ + {}, + [], + { + "empty": {} + }, + null + ], + [ + null, + null, + null, + null + ], + [ + { + "code": "network_error", + "message": "The restore request was not successful The request failed because the MSISDN does not exist in the PPS." + }, + { + "code": "network_error", + "message": "The imei for the device 007abee2-2105-459b-b4a9-d218c8180a84 could not be found" + }, + [], + { + "code": "network_error", + "message": "The imei for the device cdfd9055-3b61-46f3-945a-dfd0f6aa4f27 could not be found" + } + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/grafana-postgresql-datasource/testdata/table/types_jsonb.sql b/pkg/tsdb/grafana-postgresql-datasource/testdata/table/types_jsonb.sql new file mode 100644 index 00000000000..d91a712d152 --- /dev/null +++ b/pkg/tsdb/grafana-postgresql-datasource/testdata/table/types_jsonb.sql @@ -0,0 +1,19 @@ +-- SELECT * FROM tbl +-- test jsonb data types including complex structures and edge cases +CREATE TEMPORARY TABLE tbl ( + id integer, + simple_json jsonb, + simple_json_nn jsonb NOT NULL, + complex_json jsonb, + array_json jsonb, + nested_json jsonb, + empty_json jsonb, + null_json jsonb, + network_response jsonb +); + +INSERT INTO tbl (id, simple_json, simple_json_nn, complex_json, array_json, nested_json, empty_json, null_json, network_response) VALUES +(1, '{"key": "value"}', '{"status": "active"}', '{"user": {"id": 123, "name": "John Doe", "email": "john@example.com"}, "metadata": {"created": "2023-01-01", "tags": ["user", "active"]}}', '[1, 2, 3, "test"]', '{"level1": {"level2": {"level3": {"data": "deep"}}}}', '{}', 'null', '{"code": "network_error", "message": "The restore request was not successful The request failed because the MSISDN does not exist in the PPS."}'), +(2, '{"number": 42, "boolean": true}', '{"status": "inactive"}', '{"product": {"id": "abc-123", "price": 99.99, "available": true}, "inventory": {"count": 5, "warehouse": "US-WEST"}}', '[]', '{"config": {"database": {"host": "localhost", "port": 5432, "ssl": false}}}', '[]', NULL, '{"code": "network_error", "message": "The imei for the device 007abee2-2105-459b-b4a9-d218c8180a84 could not be found"}'), +(3, NULL, '{"status": "pending"}', NULL, '[{"id": 1, "name": "item1"}, {"id": 2, "name": "item2"}]', NULL, '{"empty": {}}', 'null', '[]'), +(4, '{"special_chars": "test with spaces and symbols: !@#$%^&*()"}', '{"unicode": "测试 🚀 émojis"}', '{"error": {"code": 500, "message": "Internal server error", "details": {"stack_trace": "Error at line 42", "timestamp": "2023-12-24T14:30:00Z"}}}', '[null, true, false, 0, "", {}]', '{"a": {"b": {"c": {"d": {"e": "very deep nesting"}}}}}', 'null', 'null', '{"code": "network_error", "message": "The imei for the device cdfd9055-3b61-46f3-945a-dfd0f6aa4f27 could not be found"}'); diff --git a/pkg/tsdb/grafana-testdata-datasource/kinds/query.go b/pkg/tsdb/grafana-testdata-datasource/kinds/query.go index 1e325205c5b..c302e2a7546 100644 --- a/pkg/tsdb/grafana-testdata-datasource/kinds/query.go +++ b/pkg/tsdb/grafana-testdata-datasource/kinds/query.go @@ -72,6 +72,7 @@ const ( TestDataQueryTypeNodeGraph TestDataQueryType = "node_graph" TestDataQueryTypePredictableCsvWave TestDataQueryType = "predictable_csv_wave" TestDataQueryTypePredictablePulse TestDataQueryType = "predictable_pulse" + TestDataQueryTypeQueryMeta TestDataQueryType = "query_meta" TestDataQueryTypeRandomWalk TestDataQueryType = "random_walk" TestDataQueryTypeRandomWalkTable TestDataQueryType = "random_walk_table" TestDataQueryTypeRandomWalkWithError TestDataQueryType = "random_walk_with_error" diff --git a/pkg/tsdb/grafana-testdata-datasource/kinds/query.panel.schema.json b/pkg/tsdb/grafana-testdata-datasource/kinds/query.panel.schema.json index 419e2afcc29..e490eecc61f 100644 --- a/pkg/tsdb/grafana-testdata-datasource/kinds/query.panel.schema.json +++ b/pkg/tsdb/grafana-testdata-datasource/kinds/query.panel.schema.json @@ -229,7 +229,7 @@ "additionalProperties": false }, "scenarioId": { - "description": "Possible enum values:\n - `\"annotations\"` \n - `\"arrow\"` \n - `\"csv_content\"` \n - `\"csv_file\"` \n - `\"csv_metric_values\"` \n - `\"datapoints_outside_range\"` \n - `\"error_with_source\"` \n - `\"exponential_heatmap_bucket_data\"` \n - `\"flame_graph\"` \n - `\"grafana_api\"` \n - `\"linear_heatmap_bucket_data\"` \n - `\"live\"` \n - `\"logs\"` \n - `\"manual_entry\"` \n - `\"no_data_points\"` \n - `\"node_graph\"` \n - `\"predictable_csv_wave\"` \n - `\"predictable_pulse\"` \n - `\"random_walk\"` \n - `\"random_walk_table\"` \n - `\"random_walk_with_error\"` \n - `\"raw_frame\"` \n - `\"server_error_500\"` \n - `\"steps\"` \n - `\"simulation\"` \n - `\"slow_query\"` \n - `\"streaming_client\"` \n - `\"table_static\"` \n - `\"trace\"` \n - `\"usa\"` \n - `\"variables-query\"` ", + "description": "Possible enum values:\n - `\"annotations\"` \n - `\"arrow\"` \n - `\"csv_content\"` \n - `\"csv_file\"` \n - `\"csv_metric_values\"` \n - `\"datapoints_outside_range\"` \n - `\"error_with_source\"` \n - `\"exponential_heatmap_bucket_data\"` \n - `\"flame_graph\"` \n - `\"grafana_api\"` \n - `\"linear_heatmap_bucket_data\"` \n - `\"live\"` \n - `\"logs\"` \n - `\"manual_entry\"` \n - `\"no_data_points\"` \n - `\"node_graph\"` \n - `\"predictable_csv_wave\"` \n - `\"predictable_pulse\"` \n - `\"query_meta\"` \n - `\"random_walk\"` \n - `\"random_walk_table\"` \n - `\"random_walk_with_error\"` \n - `\"raw_frame\"` \n - `\"server_error_500\"` \n - `\"steps\"` \n - `\"simulation\"` \n - `\"slow_query\"` \n - `\"streaming_client\"` \n - `\"table_static\"` \n - `\"trace\"` \n - `\"usa\"` \n - `\"variables-query\"` ", "type": "string", "enum": [ "annotations", @@ -250,6 +250,7 @@ "node_graph", "predictable_csv_wave", "predictable_pulse", + "query_meta", "random_walk", "random_walk_table", "random_walk_with_error", diff --git a/pkg/tsdb/grafana-testdata-datasource/kinds/query.request.schema.json b/pkg/tsdb/grafana-testdata-datasource/kinds/query.request.schema.json index 824bea0896b..68afcaf9c23 100644 --- a/pkg/tsdb/grafana-testdata-datasource/kinds/query.request.schema.json +++ b/pkg/tsdb/grafana-testdata-datasource/kinds/query.request.schema.json @@ -239,7 +239,7 @@ "additionalProperties": false }, "scenarioId": { - "description": "Possible enum values:\n - `\"annotations\"` \n - `\"arrow\"` \n - `\"csv_content\"` \n - `\"csv_file\"` \n - `\"csv_metric_values\"` \n - `\"datapoints_outside_range\"` \n - `\"error_with_source\"` \n - `\"exponential_heatmap_bucket_data\"` \n - `\"flame_graph\"` \n - `\"grafana_api\"` \n - `\"linear_heatmap_bucket_data\"` \n - `\"live\"` \n - `\"logs\"` \n - `\"manual_entry\"` \n - `\"no_data_points\"` \n - `\"node_graph\"` \n - `\"predictable_csv_wave\"` \n - `\"predictable_pulse\"` \n - `\"random_walk\"` \n - `\"random_walk_table\"` \n - `\"random_walk_with_error\"` \n - `\"raw_frame\"` \n - `\"server_error_500\"` \n - `\"steps\"` \n - `\"simulation\"` \n - `\"slow_query\"` \n - `\"streaming_client\"` \n - `\"table_static\"` \n - `\"trace\"` \n - `\"usa\"` \n - `\"variables-query\"` ", + "description": "Possible enum values:\n - `\"annotations\"` \n - `\"arrow\"` \n - `\"csv_content\"` \n - `\"csv_file\"` \n - `\"csv_metric_values\"` \n - `\"datapoints_outside_range\"` \n - `\"error_with_source\"` \n - `\"exponential_heatmap_bucket_data\"` \n - `\"flame_graph\"` \n - `\"grafana_api\"` \n - `\"linear_heatmap_bucket_data\"` \n - `\"live\"` \n - `\"logs\"` \n - `\"manual_entry\"` \n - `\"no_data_points\"` \n - `\"node_graph\"` \n - `\"predictable_csv_wave\"` \n - `\"predictable_pulse\"` \n - `\"query_meta\"` \n - `\"random_walk\"` \n - `\"random_walk_table\"` \n - `\"random_walk_with_error\"` \n - `\"raw_frame\"` \n - `\"server_error_500\"` \n - `\"steps\"` \n - `\"simulation\"` \n - `\"slow_query\"` \n - `\"streaming_client\"` \n - `\"table_static\"` \n - `\"trace\"` \n - `\"usa\"` \n - `\"variables-query\"` ", "type": "string", "enum": [ "annotations", @@ -260,6 +260,7 @@ "node_graph", "predictable_csv_wave", "predictable_pulse", + "query_meta", "random_walk", "random_walk_table", "random_walk_with_error", diff --git a/pkg/tsdb/grafana-testdata-datasource/kinds/query.types.json b/pkg/tsdb/grafana-testdata-datasource/kinds/query.types.json index af7cb24a888..5b0f3e97aaa 100644 --- a/pkg/tsdb/grafana-testdata-datasource/kinds/query.types.json +++ b/pkg/tsdb/grafana-testdata-datasource/kinds/query.types.json @@ -151,7 +151,7 @@ "type": "string" }, "scenarioId": { - "description": "Possible enum values:\n - `\"annotations\"` \n - `\"arrow\"` \n - `\"csv_content\"` \n - `\"csv_file\"` \n - `\"csv_metric_values\"` \n - `\"datapoints_outside_range\"` \n - `\"error_with_source\"` \n - `\"exponential_heatmap_bucket_data\"` \n - `\"flame_graph\"` \n - `\"grafana_api\"` \n - `\"linear_heatmap_bucket_data\"` \n - `\"live\"` \n - `\"logs\"` \n - `\"manual_entry\"` \n - `\"no_data_points\"` \n - `\"node_graph\"` \n - `\"predictable_csv_wave\"` \n - `\"predictable_pulse\"` \n - `\"random_walk\"` \n - `\"random_walk_table\"` \n - `\"random_walk_with_error\"` \n - `\"raw_frame\"` \n - `\"server_error_500\"` \n - `\"steps\"` \n - `\"simulation\"` \n - `\"slow_query\"` \n - `\"streaming_client\"` \n - `\"table_static\"` \n - `\"trace\"` \n - `\"usa\"` \n - `\"variables-query\"` ", + "description": "Possible enum values:\n - `\"annotations\"` \n - `\"arrow\"` \n - `\"csv_content\"` \n - `\"csv_file\"` \n - `\"csv_metric_values\"` \n - `\"datapoints_outside_range\"` \n - `\"error_with_source\"` \n - `\"exponential_heatmap_bucket_data\"` \n - `\"flame_graph\"` \n - `\"grafana_api\"` \n - `\"linear_heatmap_bucket_data\"` \n - `\"live\"` \n - `\"logs\"` \n - `\"manual_entry\"` \n - `\"no_data_points\"` \n - `\"node_graph\"` \n - `\"predictable_csv_wave\"` \n - `\"predictable_pulse\"` \n - `\"query_meta\"` \n - `\"random_walk\"` \n - `\"random_walk_table\"` \n - `\"random_walk_with_error\"` \n - `\"raw_frame\"` \n - `\"server_error_500\"` \n - `\"steps\"` \n - `\"simulation\"` \n - `\"slow_query\"` \n - `\"streaming_client\"` \n - `\"table_static\"` \n - `\"trace\"` \n - `\"usa\"` \n - `\"variables-query\"` ", "enum": [ "annotations", "arrow", @@ -171,6 +171,7 @@ "node_graph", "predictable_csv_wave", "predictable_pulse", + "query_meta", "random_walk", "random_walk_table", "random_walk_with_error", diff --git a/pkg/tsdb/grafana-testdata-datasource/scenarios.go b/pkg/tsdb/grafana-testdata-datasource/scenarios.go index 43b8b1347b3..45b6710318e 100644 --- a/pkg/tsdb/grafana-testdata-datasource/scenarios.go +++ b/pkg/tsdb/grafana-testdata-datasource/scenarios.go @@ -158,6 +158,12 @@ Timestamps will line up evenly on timeStepSeconds (For example, 60 seconds means handler: s.handleRandomWalkWithErrorScenario, }) + s.registerScenario(&Scenario{ + ID: kinds.TestDataQueryTypeQueryMeta, + Name: "Query Metadata", + handler: s.handleQueryMetaScenario, + }) + s.registerScenario(&Scenario{ // Is no longer strictly a _server_ error scenario, but ID is kept for legacy :) ID: kinds.TestDataQueryTypeServerError500, @@ -390,6 +396,31 @@ func (s *Service) handleCSVMetricValuesScenario(ctx context.Context, req *backen return resp, nil } +func (s *Service) handleQueryMetaScenario(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { + resp := backend.NewQueryDataResponse() + + if len(req.Queries) == 0 { + return nil, errors.New("no queries") + } + + refId := req.Queries[0].RefID + + username := req.PluginContext.User.Name + + keys := []string{"username"} + values := []string{username} + + frame := data.NewFrame("", + data.NewField("keys", nil, keys), + data.NewField("values", nil, values), + ) + r := backend.DataResponse{} + r.Frames = data.Frames{frame} + + resp.Responses[refId] = r + return resp, nil +} + func (s *Service) handleRandomWalkWithErrorScenario(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { resp := backend.NewQueryDataResponse() diff --git a/pkg/tsdb/graphite/graphite.go b/pkg/tsdb/graphite/graphite.go index 512598da1b7..2a7ffb13a91 100644 --- a/pkg/tsdb/graphite/graphite.go +++ b/pkg/tsdb/graphite/graphite.go @@ -100,7 +100,7 @@ func (s *Service) CallResource(ctx context.Context, req *backend.CallResourceReq func (s *Service) createRequest(ctx context.Context, dsInfo *datasourceInfo, params URLParams) (*http.Request, error) { u, err := url.Parse(dsInfo.URL) if err != nil { - return nil, err + return nil, backend.DownstreamError(err) } if params.SubPath != "" { @@ -125,7 +125,7 @@ func (s *Service) createRequest(ctx context.Context, dsInfo *datasourceInfo, par req, err := http.NewRequestWithContext(ctx, method, u.String(), params.Body) if err != nil { s.logger.Info("Failed to create request", "error", err) - return nil, fmt.Errorf("failed to create request: %w", err) + return nil, backend.PluginError(fmt.Errorf("failed to create request: %w", err)) } for k, v := range params.Headers { diff --git a/pkg/tsdb/graphite/healthcheck_test.go b/pkg/tsdb/graphite/healthcheck_test.go index 8c19d809ea0..98df2453b59 100644 --- a/pkg/tsdb/graphite/healthcheck_test.go +++ b/pkg/tsdb/graphite/healthcheck_test.go @@ -42,7 +42,7 @@ func (rt *healthCheckFailRoundTripper) RoundTrip(req *http.Request) (*http.Respo Status: "400", StatusCode: 400, Header: nil, - Body: nil, + Body: io.NopCloser(strings.NewReader("this is a failed healthcheck")), ContentLength: 0, Request: req, }, nil @@ -107,7 +107,7 @@ func Test_CheckHealth(t *testing.T) { assert.NoError(t, err) assert.Equal(t, backend.HealthStatusError, res.Status) assert.Equal(t, "Graphite health check failed. See details below", res.Message) - assert.Equal(t, []byte("{\"verboseMessage\": \"request failed, status: 400\" }"), res.JSONDetails) + assert.Equal(t, []byte("{\"verboseMessage\": \"request failed with error: this is a failed healthcheck\" }"), res.JSONDetails) }) } diff --git a/pkg/tsdb/graphite/query.go b/pkg/tsdb/graphite/query.go index 460ac2cfba5..6f4d2bca083 100644 --- a/pkg/tsdb/graphite/query.go +++ b/pkg/tsdb/graphite/query.go @@ -16,8 +16,10 @@ import ( "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/tracing" "github.com/grafana/grafana-plugin-sdk-go/data" + "github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "golang.org/x/net/html" ) func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, dsInfo *datasourceInfo) (*backend.QueryDataResponse, error) { @@ -26,10 +28,13 @@ func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, d req *http.Request formData url.Values }{} + result := backend.NewQueryDataResponse() + for _, query := range req.Queries { graphiteReq, formData, emptyQuery, err := s.createGraphiteRequest(ctx, query, dsInfo) if err != nil { - return nil, err + result.Responses[query.RefID] = backend.ErrorResponseWithErrorSource(err) + return result, nil } if emptyQuery != nil { @@ -46,7 +51,6 @@ func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, d } } - var result = backend.QueryDataResponse{} if len(emptyQueries) != 0 { s.logger.Warn("Found query models without targets", "models without targets", strings.Join(emptyQueries, "\n")) // If no queries had a valid target, return an error; otherwise, attempt with the targets we have @@ -56,9 +60,9 @@ func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, d } // marking this downstream error as it is a user error, but arguably this is a plugin error // since the plugin should have frontend validation that prevents us from getting into this state - missingQueryResponse := backend.ErrDataResponseWithSource(400, backend.ErrorSourceDownstream, "no query target found for the alert rule") + missingQueryResponse := backend.ErrDataResponseWithSource(400, backend.ErrorSourceDownstream, "no query target found") result.Responses["A"] = missingQueryResponse - return &result, nil + return result, nil } } @@ -83,7 +87,8 @@ func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, d if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) - return &result, err + result.Responses[refId] = backend.ErrorResponseWithErrorSource(backend.DownstreamError(err)) + return result, nil } defer func() { @@ -97,16 +102,13 @@ func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, d if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) - return &result, err + result.Responses[refId] = backend.ErrorResponseWithErrorSource(err) + return result, nil } frames = append(frames, queryFrames...) } - result = backend.QueryDataResponse{ - Responses: make(backend.Responses), - } - for _, f := range frames { if resp, ok := result.Responses[f.Name]; ok { resp.Frames = append(resp.Frames, f) @@ -118,7 +120,7 @@ func (s *Service) RunQuery(ctx context.Context, req *backend.QueryDataRequest, d } } - return &result, nil + return result, nil } // processQuery converts a Graphite data source query to a Graphite query target. It returns the target, @@ -127,7 +129,7 @@ func (s *Service) processQuery(query backend.DataQuery) (string, *GraphiteQuery, queryJSON := GraphiteQuery{} err := json.Unmarshal(query.JSON, &queryJSON) if err != nil { - return "", &queryJSON, false, fmt.Errorf("failed to decode the Graphite query: %w", err) + return "", &queryJSON, false, backend.PluginError(fmt.Errorf("failed to decode the Graphite query: %w", err)) } s.logger.Debug("Graphite", "query", queryJSON) currTarget := queryJSON.TargetFull @@ -237,7 +239,7 @@ func (s *Service) toDataFrames(response *http.Response, refId string) (frames da func (s *Service) parseResponse(res *http.Response) ([]TargetResponseDTO, error) { body, err := io.ReadAll(res.Body) if err != nil { - return nil, err + return nil, backend.DownstreamError(err) } defer func() { if err := res.Body.Close(); err != nil { @@ -246,20 +248,50 @@ func (s *Service) parseResponse(res *http.Response) ([]TargetResponseDTO, error) }() if res.StatusCode/100 != 2 { - s.logger.Info("Request failed", "status", res.Status, "body", string(body)) - return nil, fmt.Errorf("request failed, status: %s", res.Status) + graphiteError := parseGraphiteError(res.StatusCode, string(body)) + s.logger.Info("Request failed", "status", res.Status, "error", graphiteError, "body", string(body)) + return nil, errorsource.SourceError(backend.ErrorSourceFromHTTPStatus(res.StatusCode), fmt.Errorf("request failed with error: %s", graphiteError), false) } var data []TargetResponseDTO err = json.Unmarshal(body, &data) if err != nil { s.logger.Info("Failed to unmarshal graphite response", "error", err, "status", res.Status, "body", string(body)) - return nil, err + return nil, backend.DownstreamError(err) } return data, nil } +/** + * Duplicated from the frontend. + * Graphite-web before v1.6 returns HTTP 500 with full stack traces in an HTML page + * when a query fails. It results in massive error alerts with HTML tags in the UI. + * This function removes all HTML tags and keeps only the last line from the stack + * trace which should be the most meaningful. + */ +func parseGraphiteError(status int, body string) (errorMsg string) { + errorMsg = body + if status == http.StatusInternalServerError { + if strings.HasPrefix(body, " +

Internal Server Error

+

The server encountered an unexpected condition that prevented it from fulfilling the request.

+
Error: Invalid metric path 'stats.invalid.metric'
+Error: Target not found +`, + queries: []backend.DataQuery{ + { + RefID: "A", + TimeRange: backend.TimeRange{ + From: time.Unix(1609459200, 0), + To: time.Unix(1609459260, 0), + }, + MaxDataPoints: 1000, + JSON: []byte(`{ + "target": "stats.invalid.metric" + }`), + }, + }, + expectError: true, + errorContains: "Error: Target not found", // Should parse HTML and extract the last meaningful line + }, + { + name: "malformed JSON response", + serverStatus: 200, + serverResponse: `[{invalid json}]`, + queries: []backend.DataQuery{ + { + RefID: "A", + TimeRange: backend.TimeRange{ + From: time.Unix(1609459200, 0), + To: time.Unix(1609459260, 0), + }, + MaxDataPoints: 1000, + JSON: []byte(`{ + "target": "stats.counters.web.hits" + }`), + }, + }, + expectError: true, + }, + { + name: "invalid query JSON", + serverStatus: 200, + serverResponse: `[]`, + queries: []backend.DataQuery{ + { + RefID: "A", + TimeRange: backend.TimeRange{ + From: time.Unix(1609459200, 0), + To: time.Unix(1609459260, 0), + }, + MaxDataPoints: 1000, + JSON: []byte(`{invalid json}`), + }, + }, + expectError: true, + errorContains: "failed to decode the Graphite query", + }, + { + name: "interval format transformation", + serverStatus: 200, + serverResponse: `[ + { + "target": "hitcount(stats.counters.web.hits, '1min')", + "datapoints": [[100, 1609459200], [150, 1609459260]] + } + ]`, + queries: []backend.DataQuery{ + { + RefID: "A", + TimeRange: backend.TimeRange{ + From: time.Unix(1609459200, 0), + To: time.Unix(1609459260, 0), + }, + MaxDataPoints: 1000, + JSON: []byte(`{ + "target": "hitcount(stats.counters.web.hits, '1m')" + }`), + }, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testName := strings.ReplaceAll(tt.name, " ", "_") + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + require.NoError(t, err) + + // Choose response based on target for multiple queries + response := tt.serverResponse + if tt.multipleTargets != nil { + target := r.FormValue("target") + if targetResponse, ok := tt.multipleTargets[target]; ok { + response = targetResponse + } + } + + if !strings.Contains(tt.name, "empty target") { + assert.NotEmpty(t, r.FormValue("target")) + } + + w.WriteHeader(tt.serverStatus) + _, err = w.Write([]byte(response)) + require.NoError(t, err) + })) + defer server.Close() + + dsInfo := &datasourceInfo{ + Id: 1, + URL: server.URL, + HTTPClient: &http.Client{}, + } + + service := &Service{ + logger: backend.Logger, + } + + req := &backend.QueryDataRequest{ + PluginContext: backend.PluginContext{ + DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{ + ID: 1, + URL: server.URL, + }, + OrgID: 1, + }, + Queries: tt.queries, + } + + result, err := service.RunQuery(context.Background(), req, dsInfo) + + if tt.expectError { + if err != nil { + if tt.errorContains != "" { + assert.Contains(t, err.Error(), tt.errorContains) + } + } else { + require.NotNil(t, result) + found := false + for _, resp := range result.Responses { + if resp.Error != nil { + found = true + if tt.errorContains != "" { + assert.Contains(t, resp.Error.Error(), tt.errorContains) + } + break + } + } + assert.True(t, found, "Expected error but none found") + } + } else { + assert.NoError(t, err) + require.NotNil(t, result) + + for refID, resp := range result.Responses { + experimental.CheckGoldenJSONResponse(t, "testdata", fmt.Sprintf("%s-RefID-%s.golden", testName, refID), &resp, false) + } + } + }) + } +} + +func TestParseGraphiteError(t *testing.T) { + tests := []struct { + name string + status int + body string + expected string + }{ + { + name: "simple text error", + status: 400, + body: "Bad request: invalid target", + expected: "Bad request: invalid target", + }, + { + name: "JSON error", + status: 400, + body: `{"error": "Invalid target format"}`, + expected: `{"error": "Invalid target format"}`, + }, + { + name: "HTML error", + status: 500, + body: `

Internal Server Error

Target not found

`, + expected: "Internal Server Error\nTarget not found", + }, + { + name: "complex HTML error", + status: 500, + body: ` +

Internal Server Error

+

The server encountered an unexpected condition that prevented it from fulfilling the request.

+
Error: Invalid metric path 'stats.invalid.metric'
+Final error message here +`, + expected: "Internal Server Error\nThe server encountered an unexpected condition that prevented it from fulfilling the request.\nError: Invalid metric path 'stats.invalid.metric'\nFinal error message here", + }, + { + name: "HTML error with unicode", + status: 500, + body: `

Error: Invalid path 'test' and "value"

`, + expected: "Error: Invalid path 'test' and \"value\"", + }, + { + name: "HTML with whitespace and newlines", + status: 500, + body: ` + +

Error

+ +

Something went wrong

+ + Critical failure occurred + + `, + expected: "Error\nSomething went wrong\nCritical failure occurred", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseGraphiteError(tt.status, tt.body) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/tsdb/graphite/resource_handler.go b/pkg/tsdb/graphite/resource_handler.go index 2651c320d65..4a3fe29282c 100644 --- a/pkg/tsdb/graphite/resource_handler.go +++ b/pkg/tsdb/graphite/resource_handler.go @@ -294,7 +294,13 @@ func (s *Service) handleFunctions(ctx context.Context, dsInfo *datasourceInfo, _ _, rawBody, statusCode, err := doGraphiteRequest[map[string]any](ctx, dsInfo, s.logger, req, true) if err != nil { - return nil, statusCode, fmt.Errorf("version request failed: %v", err) + return nil, statusCode, fmt.Errorf("functions request failed: %v", err) + } + + // It's possible that a HTML response may be returned + // This isn't valid so we'll return an error and use the default functions + if strings.HasPrefix(string(*rawBody), "<") { + return []byte{}, http.StatusNotAcceptable, fmt.Errorf("invalid functions response received from Graphite") } if rawBody == nil { diff --git a/pkg/tsdb/graphite/resource_handler_test.go b/pkg/tsdb/graphite/resource_handler_test.go index 66777f95072..c8ee9da1293 100644 --- a/pkg/tsdb/graphite/resource_handler_test.go +++ b/pkg/tsdb/graphite/resource_handler_test.go @@ -735,21 +735,41 @@ func TestHandleFunctions(t *testing.T) { responseBody: `{"error": "internal error"}`, statusCode: 500, expectError: true, - errorContains: "version request failed", + errorContains: "functions request failed", }, { name: "functions request not found", responseBody: `{"error": "not found"}`, statusCode: 404, expectError: true, - errorContains: "version request failed", + errorContains: "functions request failed", }, { name: "network error", responseBody: "", statusCode: 0, expectError: true, - errorContains: "version request failed", + errorContains: "functions request failed", + }, + { + name: "html response", + responseBody: ` + + Graphite Browser + + + + + + + + + + + `, + statusCode: 200, + expectError: true, + errorContains: "invalid functions response received from Graphite", }, } diff --git a/pkg/tsdb/graphite/testdata/interval_format_transformation-RefID-A.golden.jsonc b/pkg/tsdb/graphite/testdata/interval_format_transformation-RefID-A.golden.jsonc new file mode 100644 index 00000000000..39bda73cd9d --- /dev/null +++ b/pkg/tsdb/graphite/testdata/interval_format_transformation-RefID-A.golden.jsonc @@ -0,0 +1,72 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: A +// Dimensions: 2 Fields by 2 Rows +// +-------------------------------+------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 100 | +// | 2021-01-01 00:01:00 +0000 UTC | 150 | +// +-------------------------------+------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "A", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": {}, + "config": { + "displayNameFromDS": "hitcount(stats.counters.web.hits, '1min')" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000 + ], + [ + 100, + 150 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/graphite/testdata/mixed_queries_-_some_empty,_some_valid-RefID-B.golden.jsonc b/pkg/tsdb/graphite/testdata/mixed_queries_-_some_empty,_some_valid-RefID-B.golden.jsonc new file mode 100644 index 00000000000..455803dddc2 --- /dev/null +++ b/pkg/tsdb/graphite/testdata/mixed_queries_-_some_empty,_some_valid-RefID-B.golden.jsonc @@ -0,0 +1,72 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: B +// Dimensions: 2 Fields by 2 Rows +// +-------------------------------+------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 100 | +// | 2021-01-01 00:01:00 +0000 UTC | 150 | +// +-------------------------------+------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "B", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": {}, + "config": { + "displayNameFromDS": "stats.counters.web.hits" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000 + ], + [ + 100, + 150 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/graphite/testdata/successful_multiple_queries-RefID-A.golden.jsonc b/pkg/tsdb/graphite/testdata/successful_multiple_queries-RefID-A.golden.jsonc new file mode 100644 index 00000000000..b8ef382179e --- /dev/null +++ b/pkg/tsdb/graphite/testdata/successful_multiple_queries-RefID-A.golden.jsonc @@ -0,0 +1,72 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: A +// Dimensions: 2 Fields by 2 Rows +// +-------------------------------+------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 100 | +// | 2021-01-01 00:01:00 +0000 UTC | 150 | +// +-------------------------------+------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "A", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": {}, + "config": { + "displayNameFromDS": "stats.counters.web.hits" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000 + ], + [ + 100, + 150 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/graphite/testdata/successful_multiple_queries-RefID-B.golden.jsonc b/pkg/tsdb/graphite/testdata/successful_multiple_queries-RefID-B.golden.jsonc new file mode 100644 index 00000000000..3d208e886ee --- /dev/null +++ b/pkg/tsdb/graphite/testdata/successful_multiple_queries-RefID-B.golden.jsonc @@ -0,0 +1,72 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: B +// Dimensions: 2 Fields by 2 Rows +// +-------------------------------+------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 50 | +// | 2021-01-01 00:01:00 +0000 UTC | 75 | +// +-------------------------------+------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "B", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": {}, + "config": { + "displayNameFromDS": "stats.counters.api.calls" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000 + ], + [ + 50, + 75 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/graphite/testdata/successful_single_query_with_data-RefID-A.golden.jsonc b/pkg/tsdb/graphite/testdata/successful_single_query_with_data-RefID-A.golden.jsonc new file mode 100644 index 00000000000..5c200551f5b --- /dev/null +++ b/pkg/tsdb/graphite/testdata/successful_single_query_with_data-RefID-A.golden.jsonc @@ -0,0 +1,75 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: A +// Dimensions: 2 Fields by 3 Rows +// +-------------------------------+------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 100 | +// | 2021-01-01 00:01:00 +0000 UTC | 150 | +// | 2021-01-01 00:02:00 +0000 UTC | 120 | +// +-------------------------------+------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "A", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": {}, + "config": { + "displayNameFromDS": "stats.counters.web.hits" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000, + 1609459320000 + ], + [ + 100, + 150, + 120 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/graphite/testdata/successful_single_query_with_null_values-RefID-A.golden.jsonc b/pkg/tsdb/graphite/testdata/successful_single_query_with_null_values-RefID-A.golden.jsonc new file mode 100644 index 00000000000..fc00359a0ca --- /dev/null +++ b/pkg/tsdb/graphite/testdata/successful_single_query_with_null_values-RefID-A.golden.jsonc @@ -0,0 +1,75 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: A +// Dimensions: 2 Fields by 3 Rows +// +-------------------------------+------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 100 | +// | 2021-01-01 00:01:00 +0000 UTC | null | +// | 2021-01-01 00:02:00 +0000 UTC | 120 | +// +-------------------------------+------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "A", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": {}, + "config": { + "displayNameFromDS": "stats.counters.web.hits" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000, + 1609459320000 + ], + [ + 100, + null, + 120 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/graphite/testdata/successful_single_query_with_tags-RefID-A.golden.jsonc b/pkg/tsdb/graphite/testdata/successful_single_query_with_tags-RefID-A.golden.jsonc new file mode 100644 index 00000000000..31fba0dcea5 --- /dev/null +++ b/pkg/tsdb/graphite/testdata/successful_single_query_with_tags-RefID-A.golden.jsonc @@ -0,0 +1,77 @@ +// 🌟 This was machine generated. Do not edit. 🌟 +// +// Frame[0] { +// "type": "timeseries-multi", +// "typeVersion": [ +// 0, +// 0 +// ] +// } +// Name: A +// Dimensions: 2 Fields by 2 Rows +// +-------------------------------+--------------------------------------------------------------------+ +// | Name: time | Name: value | +// | Labels: | Labels: environment=production, host=server1, port=8080, rate=99.5 | +// | Type: []time.Time | Type: []*float64 | +// +-------------------------------+--------------------------------------------------------------------+ +// | 2021-01-01 00:00:00 +0000 UTC | 100 | +// | 2021-01-01 00:01:00 +0000 UTC | 150 | +// +-------------------------------+--------------------------------------------------------------------+ +// +// +// 🌟 This was machine generated. Do not edit. 🌟 +{ + "status": 200, + "frames": [ + { + "schema": { + "name": "A", + "meta": { + "type": "timeseries-multi", + "typeVersion": [ + 0, + 0 + ] + }, + "fields": [ + { + "name": "time", + "type": "time", + "typeInfo": { + "frame": "time.Time" + } + }, + { + "name": "value", + "type": "number", + "typeInfo": { + "frame": "float64", + "nullable": true + }, + "labels": { + "environment": "production", + "host": "server1", + "port": "8080", + "rate": "99.5" + }, + "config": { + "displayNameFromDS": "stats.counters.web.hits" + } + } + ] + }, + "data": { + "values": [ + [ + 1609459200000, + 1609459260000 + ], + [ + 100, + 150 + ] + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/tsdb/tempo/standalone/datasource.go b/pkg/tsdb/tempo/standalone/datasource.go index f449fb98fc0..d41d68f2424 100644 --- a/pkg/tsdb/tempo/standalone/datasource.go +++ b/pkg/tsdb/tempo/standalone/datasource.go @@ -13,6 +13,7 @@ import ( ) var ( + _ backend.CheckHealthHandler = (*Datasource)(nil) _ backend.QueryDataHandler = (*Datasource)(nil) _ backend.StreamHandler = (*Datasource)(nil) _ backend.CallResourceHandler = (*Datasource)(nil) @@ -28,6 +29,10 @@ func NewDatasource(c context.Context, b backend.DataSourceInstanceSettings) (ins }, nil } +func (d *Datasource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + return d.Service.CheckHealth(ctx, req) +} + func (d *Datasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { return d.Service.QueryData(ctx, req) } diff --git a/pkg/tsdb/tempo/tempo.go b/pkg/tsdb/tempo/tempo.go index 0c9d904e7b1..ccb6173489b 100644 --- a/pkg/tsdb/tempo/tempo.go +++ b/pkg/tsdb/tempo/tempo.go @@ -2,6 +2,8 @@ package tempo import ( "context" + "encoding/json" + "errors" "fmt" "io" "net/http" @@ -156,6 +158,112 @@ func (s *Service) CallResource(ctx context.Context, req *backend.CallResourceReq return s.resourceHandler.CallResource(ctx, req, sender) } +func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { + var streamingEnabled bool + var jsonData map[string]interface{} + + pluginCtx := backend.PluginConfigFromContext(ctx) + dsInfo, err := s.getDSInfo(ctx, pluginCtx) + if err != nil { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: err.Error(), + }, nil + } + + if pluginCtx.DataSourceInstanceSettings != nil && pluginCtx.DataSourceInstanceSettings.JSONData != nil { + if err := json.Unmarshal(pluginCtx.DataSourceInstanceSettings.JSONData, &jsonData); err == nil { + if streaming, ok := jsonData["streamingEnabled"].(map[string]interface{}); ok { + if searchEnabled, ok := streaming["search"].(bool); ok && searchEnabled { + streamingEnabled = true + } + } + } + } + + if streamingEnabled { + if dsInfo.StreamingClient == nil { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: "Streaming client is not available", + }, nil + } + + currentTime := time.Now() + queryStartTime := currentTime.Add(-15 * time.Minute) + searchRequest := &tempopb.SearchRequest{ + Query: "{}", + Start: uint32(queryStartTime.Unix()), + End: uint32(currentTime.Unix()), + Limit: 1, + } + + streamingConnection, err := dsInfo.StreamingClient.Search(ctx, searchRequest) + if err != nil { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: err.Error(), + }, nil + } + + _, err = streamingConnection.Recv() + if err != nil && !errors.Is(err, io.EOF) { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: err.Error(), + }, nil + } + + return &backend.CheckHealthResult{ + Status: backend.HealthStatusOk, + Message: "Data source is working. Streaming test succeeded.", + }, nil + } + + parsedURL, err := url.Parse(dsInfo.URL) + if err != nil { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: err.Error(), + }, nil + } + + parsedURL.Path = path.Join(parsedURL.Path, "api/echo") + httpReq, err := http.NewRequestWithContext(ctx, "GET", parsedURL.String(), nil) + if err != nil { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: err.Error(), + }, nil + } + + resp, err := dsInfo.HTTPClient.Do(httpReq) + if err != nil { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: err.Error(), + }, nil + } + + defer func() { + if err := resp.Body.Close(); err != nil { + s.logger.Warn("Failed to close response body", "error", err) + } + }() + + if resp.StatusCode != 200 { + return &backend.CheckHealthResult{ + Status: backend.HealthStatusError, + Message: fmt.Sprintf("Tempo echo endpoint returned status %d", resp.StatusCode), + }, nil + } + + return &backend.CheckHealthResult{ + Status: backend.HealthStatusOk, + Message: "Data source is working", + }, nil +} + // handleTags handles requests to /tags resource func (s *Service) handleTags(rw http.ResponseWriter, req *http.Request) { s.proxyToTempo(rw, req, "api/v2/search/tags") diff --git a/pkg/tsdb/tempo/tempo_test.go b/pkg/tsdb/tempo/tempo_test.go new file mode 100644 index 00000000000..02e6c56050e --- /dev/null +++ b/pkg/tsdb/tempo/tempo_test.go @@ -0,0 +1,67 @@ +package tempo + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/grafana/grafana-plugin-sdk-go/backend" + "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" + "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" + "github.com/stretchr/testify/assert" +) + +func TestCheckHealth(t *testing.T) { + tests := []struct { + name string + httpStatusCode int + expectedStatus backend.HealthStatus + expectedMessage string + }{ + { + name: "successful health check", + httpStatusCode: 200, + expectedStatus: backend.HealthStatusOk, + expectedMessage: "Data source is working", + }, + { + name: "http error", + httpStatusCode: 500, + expectedStatus: backend.HealthStatusError, + expectedMessage: "Tempo echo endpoint returned status 500", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(tt.httpStatusCode) + })) + defer server.Close() + + pluginCtx := backend.PluginContext{ + DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{ + URL: server.URL, + }, + } + + im := datasource.NewInstanceManager(func(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { + dsInfo := &DatasourceInfo{ + URL: server.URL, + HTTPClient: server.Client(), + StreamingClient: nil, + } + return dsInfo, nil + }) + + service := &Service{im: im} + ctx := backend.WithPluginContext(context.Background(), pluginCtx) + result, err := service.CheckHealth(ctx, &backend.CheckHealthRequest{}) + + assert.NoError(t, err) + assert.Equal(t, tt.expectedStatus, result.Status) + assert.Contains(t, result.Message, tt.expectedMessage) + }) + } +} diff --git a/pkg/util/debouncer/queue.go b/pkg/util/debouncer/queue.go new file mode 100644 index 00000000000..a6dd404da3a --- /dev/null +++ b/pkg/util/debouncer/queue.go @@ -0,0 +1,119 @@ +package debouncer + +import ( + "context" + "errors" + "slices" + "sync" +) + +type CombineFn[T any] func(a, b T) (c T, ok bool) + +// Queue is a queue of elements. Elements added to the queue can be combined together by the provided combiner function. +// Once the queue is closed, no more elements can be added, but Next() will still return remaining elements. +type Queue[T any] struct { + combineFn CombineFn[T] + + mu sync.Mutex + elements []T + closed bool + waitChan chan struct{} // if not nil, will be closed when new element is added +} + +func NewQueue[T any](combineFn CombineFn[T]) *Queue[T] { + return &Queue[T]{ + combineFn: combineFn, + } +} + +func (q *Queue[T]) Len() int { + q.mu.Lock() + defer q.mu.Unlock() + return len(q.elements) +} + +// Elements returns copy of the queue. +func (q *Queue[T]) Elements() []T { + q.mu.Lock() + defer q.mu.Unlock() + return slices.Clone(q.elements) +} + +func (q *Queue[T]) Add(n T) { + q.mu.Lock() + defer q.mu.Unlock() + if q.closed { + panic("queue already closed") + } + + for i, e := range q.elements { + if c, ok := q.combineFn(e, n); ok { + // No need to signal, since we are not adding new element. + q.elements[i] = c + return + } + } + + q.elements = append(q.elements, n) + q.notifyWaiters() +} + +// Must be called with lock held. +func (q *Queue[T]) notifyWaiters() { + if q.waitChan != nil { + // Wakes up all waiting goroutines (but also possibly zero, if they stopped waiting already). + close(q.waitChan) + q.waitChan = nil + } +} + +var ErrClosed = errors.New("queue closed") + +// Next returns the next element in the queue. If no element is available, Next will block until +// an element is added to the queue, or provided context is done. +// If the queue is closed, ErrClosed is returned. +func (q *Queue[T]) Next(ctx context.Context) (T, error) { + var zero T + + q.mu.Lock() + unlockInDefer := true + defer func() { + if unlockInDefer { + q.mu.Unlock() + } + }() + + for len(q.elements) == 0 { + if q.closed { + return zero, ErrClosed + } + + // Wait for an element. Make sure there's a wait channel that we can use. + wch := q.waitChan + if wch == nil { + wch = make(chan struct{}) + q.waitChan = wch + } + // Unlock before waiting + q.mu.Unlock() + + select { + case <-ctx.Done(): + unlockInDefer = false + return zero, ctx.Err() + case <-wch: + q.mu.Lock() + } + } + + first := q.elements[0] + q.elements = q.elements[1:] + return first, nil +} + +func (q *Queue[T]) Close() { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.notifyWaiters() +} diff --git a/pkg/util/debouncer/queue_test.go b/pkg/util/debouncer/queue_test.go new file mode 100644 index 00000000000..76106f8f99a --- /dev/null +++ b/pkg/util/debouncer/queue_test.go @@ -0,0 +1,155 @@ +package debouncer + +import ( + "context" + "errors" + "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "go.uber.org/goleak" +) + +// This verifies that all goroutines spawned from tests are finished at the end of tests. +// Applies to all tests in the package. +func TestMain(m *testing.M) { + goleak.VerifyTestMain(m) +} + +func TestQueueBasic(t *testing.T) { + q := NewQueue(func(a, b int) (c int, ok bool) { + return a + b, true + }) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + // Empty queue will time out. + require.Equal(t, context.DeadlineExceeded, nextErr(t, q, ctx)) + + q.Add(10) + require.Equal(t, 10, next(t, q)) + require.Equal(t, 0, q.Len()) + + q.Add(20) + require.Equal(t, 20, next(t, q)) + require.Equal(t, 0, q.Len()) + + q.Add(10) + require.Equal(t, 1, q.Len()) + q.Add(20) + require.Equal(t, 1, q.Len()) + require.Equal(t, 30, next(t, q)) + require.Equal(t, 0, q.Len()) + + q.Add(100) + require.Equal(t, 1, q.Len()) + q.Close() + require.Equal(t, 1, q.Len()) + require.Equal(t, 100, next(t, q)) + require.Equal(t, ErrClosed, nextErr(t, q, context.Background())) + require.Equal(t, 0, q.Len()) + + // We can call Next repeatedly, but will always get error. + require.Equal(t, ErrClosed, nextErr(t, q, context.Background())) +} + +func TestQueueConcurrency(t *testing.T) { + q := NewQueue(func(a, b int64) (c int64, ok bool) { + // Combine the same numbers together. + if a == b { + return a + b, true + } + return 0, false + }) + + const numbers = 10000 + const writeConcurrency = 50 + const readConcurrency = 25 + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + totalWrittenSum := atomic.NewInt64(0) + totalReadSum := atomic.NewInt64(0) + addCalls := atomic.NewInt64(0) + nextCalls := atomic.NewInt64(0) + + // We will add some numbers to the queue. + writesWG := sync.WaitGroup{} + for i := 0; i < writeConcurrency; i++ { + writesWG.Add(1) + go func() { + defer writesWG.Done() + for j := 0; j < numbers; j++ { + v := r.Int63n(100) // Generate small number, so that we have a chance for combining some numbers. + q.Add(v) + addCalls.Inc() + totalWrittenSum.Add(v) + } + }() + } + + readsWG := sync.WaitGroup{} + for i := 0; i < readConcurrency; i++ { + readsWG.Add(1) + go func() { + defer readsWG.Done() + + for { + v, err := q.Next(context.Background()) + if errors.Is(err, ErrClosed) { + return + } + require.NoError(t, err) + + nextCalls.Inc() + totalReadSum.Add(v) + } + }() + } + + writesWG.Wait() + // Close queue after sending all numbers. This signals readers that they can stop. + q.Close() + + // Wait until all readers finish too. + readsWG.Wait() + + // Verify that all numbers were sent, combined and received. + require.Equal(t, int64(writeConcurrency*numbers), addCalls.Load()) + require.Equal(t, totalWrittenSum.Load(), totalReadSum.Load()) + require.LessOrEqual(t, nextCalls.Load(), addCalls.Load()) + t.Log("add calls:", addCalls.Load(), "next calls:", nextCalls.Load(), "total written sum:", totalWrittenSum.Load(), "total read sum:", totalReadSum.Load()) +} + +func TestQueueCloseUnblocksReaders(t *testing.T) { + q := NewQueue(func(a, b int) (c int, ok bool) { + return a + b, true + }) + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(50 * time.Millisecond) + q.Close() + }() + + _, err := q.Next(context.Background()) + require.ErrorIs(t, err, ErrClosed) + + wg.Wait() +} + +func next[T any](t *testing.T, q *Queue[T]) T { + v, err := q.Next(context.Background()) + require.NoError(t, err) + return v +} +func nextErr[T any](t *testing.T, q *Queue[T], ctx context.Context) error { + _, err := q.Next(ctx) + require.Error(t, err) + return err +} diff --git a/pkg/util/sqlite/sqlite_cgo.go b/pkg/util/sqlite/sqlite_cgo.go index de18511741d..a52a4d5e2d7 100644 --- a/pkg/util/sqlite/sqlite_cgo.go +++ b/pkg/util/sqlite/sqlite_cgo.go @@ -35,12 +35,13 @@ func IsUniqueConstraintViolation(err error) bool { } func ErrorMessage(err error) string { - if err == nil { - return "" - } var sqliteErr sqlite3.Error if errors.As(err, &sqliteErr) { return sqliteErr.Error() } return err.Error() } + +func DriverType() string { + return "mattn/go-sqlite3 (CGO enabled)" +} diff --git a/pkg/util/sqlite/sqlite_nocgo.go b/pkg/util/sqlite/sqlite_nocgo.go index ec77078ea20..f283f858d32 100644 --- a/pkg/util/sqlite/sqlite_nocgo.go +++ b/pkg/util/sqlite/sqlite_nocgo.go @@ -67,7 +67,7 @@ func convertSQLite3URL(dsn string) (string, error) { newDSN := dsn[:pos] q := url.Values{} - q.Add("_pragma", "busy_timeout(5000)") + q.Add("_pragma", "busy_timeout(7500)") // Default of mattn/go-sqlite3 is 5s but we increase it to 7.5s to try and avoid busy errors. for key, values := range params { if alias, ok := dsnAlias[strings.ToLower(key)]; ok { @@ -112,6 +112,10 @@ func init() { sql.Register("sqlite3", &moderncDriver{Driver: &Driver{}}) } +func DriverType() string { + return "modernc.org/sqlite (CGO disabled)" +} + func IsBusyOrLocked(err error) bool { var sqliteErr *sqlite.Error if errors.As(err, &sqliteErr) { diff --git a/public/app/api/clients/folder/v1beta1/index.ts b/public/app/api/clients/folder/v1beta1/index.ts index cefd69c4955..bb0e11f218f 100644 --- a/public/app/api/clients/folder/v1beta1/index.ts +++ b/public/app/api/clients/folder/v1beta1/index.ts @@ -24,24 +24,6 @@ export const folderAPIv1beta1 = generatedAPI // We don't want delete to invalidate getFolder tags, as that would lead to unnecessary 404s invalidatesTags: (result, error) => (error ? [] : [{ type: 'Folder', id: 'LIST' }]), }, - updateFolder: { - query: (queryArg) => ({ - url: `/folders/${queryArg.name}`, - method: 'PATCH', - // We need to stringify the body and set the correct header for the call to work with k8s api. - body: JSON.stringify(queryArg.patch), - headers: { - 'Content-Type': 'application/strategic-merge-patch+json', - }, - params: { - pretty: queryArg.pretty, - dryRun: queryArg.dryRun, - fieldManager: queryArg.fieldManager, - fieldValidation: queryArg.fieldValidation, - force: queryArg.force, - }, - }), - }, }, }) .injectEndpoints({ @@ -98,4 +80,4 @@ export const { } = folderAPIv1beta1; // eslint-disable-next-line no-barrel-files/no-barrel-files -export { type Folder, type FolderList, type CreateFolderApiArg, type ReplaceFolderApiArg } from './endpoints.gen'; +export { type CreateFolderApiArg, type Folder, type FolderList, type ReplaceFolderApiArg } from './endpoints.gen'; diff --git a/public/app/api/clients/playlist/v0alpha1/endpoints.gen.ts b/public/app/api/clients/playlist/v0alpha1/endpoints.gen.ts index 347f6d2b86a..7a42a89de07 100644 --- a/public/app/api/clients/playlist/v0alpha1/endpoints.gen.ts +++ b/public/app/api/clients/playlist/v0alpha1/endpoints.gen.ts @@ -308,11 +308,11 @@ export type PlaylistStatus = { }; export type Playlist = { /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; + apiVersion: string; /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata?: ObjectMeta; - spec?: PlaylistSpec; + kind: string; + metadata: ObjectMeta; + spec: PlaylistSpec; status?: PlaylistStatus; }; export type ListMeta = { diff --git a/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts b/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts index ac0a34e9af5..2ea0fd00730 100644 --- a/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts +++ b/public/app/api/clients/preferences/v1alpha1/endpoints.gen.ts @@ -14,12 +14,12 @@ const injectedRtkApi = api query: (queryArg) => ({ url: `/preferences`, params: { + pretty: queryArg.pretty, allowWatchBookmarks: queryArg.allowWatchBookmarks, continue: queryArg['continue'], fieldSelector: queryArg.fieldSelector, labelSelector: queryArg.labelSelector, limit: queryArg.limit, - pretty: queryArg.pretty, resourceVersion: queryArg.resourceVersion, resourceVersionMatch: queryArg.resourceVersionMatch, sendInitialEvents: queryArg.sendInitialEvents, @@ -29,6 +29,20 @@ const injectedRtkApi = api }), providesTags: ['Preferences'], }), + createPreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences`, + method: 'POST', + body: queryArg.preferences, + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + fieldManager: queryArg.fieldManager, + fieldValidation: queryArg.fieldValidation, + }, + }), + invalidatesTags: ['Preferences'], + }), mergedPreferences: build.query({ query: () => ({ url: `/preferences/merged` }), providesTags: ['Preferences'], @@ -42,6 +56,50 @@ const injectedRtkApi = api }), providesTags: ['Preferences'], }), + replacePreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences/${queryArg.name}`, + method: 'PUT', + body: queryArg.preferences, + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + fieldManager: queryArg.fieldManager, + fieldValidation: queryArg.fieldValidation, + }, + }), + invalidatesTags: ['Preferences'], + }), + deletePreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences/${queryArg.name}`, + method: 'DELETE', + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + gracePeriodSeconds: queryArg.gracePeriodSeconds, + ignoreStoreReadErrorWithClusterBreakingPotential: queryArg.ignoreStoreReadErrorWithClusterBreakingPotential, + orphanDependents: queryArg.orphanDependents, + propagationPolicy: queryArg.propagationPolicy, + }, + }), + invalidatesTags: ['Preferences'], + }), + updatePreferences: build.mutation({ + query: (queryArg) => ({ + url: `/preferences/${queryArg.name}`, + method: 'PATCH', + body: queryArg.patch, + params: { + pretty: queryArg.pretty, + dryRun: queryArg.dryRun, + fieldManager: queryArg.fieldManager, + fieldValidation: queryArg.fieldValidation, + force: queryArg.force, + }, + }), + invalidatesTags: ['Preferences'], + }), listStars: build.query({ query: (queryArg) => ({ url: `/stars`, @@ -173,6 +231,8 @@ export type GetApiResourcesApiResponse = /** status 200 OK */ ApiResourceList; export type GetApiResourcesApiArg = void; export type ListPreferencesApiResponse = /** status 200 OK */ PreferencesList; export type ListPreferencesApiArg = { + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; /** allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. */ allowWatchBookmarks?: boolean; /** The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key". @@ -187,8 +247,6 @@ export type ListPreferencesApiArg = { The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. */ limit?: number; - /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ - pretty?: string; /** resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset */ @@ -216,6 +274,21 @@ export type ListPreferencesApiArg = { /** Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. */ watch?: boolean; }; +export type CreatePreferencesApiResponse = /** status 200 OK */ + | Preferences + | /** status 201 Created */ Preferences + | /** status 202 Accepted */ Preferences; +export type CreatePreferencesApiArg = { + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */ + fieldManager?: string; + /** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */ + fieldValidation?: string; + preferences: Preferences; +}; export type MergedPreferencesApiResponse = /** status 200 undefined */ any; export type MergedPreferencesApiArg = void; export type GetPreferencesApiResponse = /** status 200 OK */ Preferences; @@ -225,6 +298,53 @@ export type GetPreferencesApiArg = { /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ pretty?: string; }; +export type ReplacePreferencesApiResponse = /** status 200 OK */ Preferences | /** status 201 Created */ Preferences; +export type ReplacePreferencesApiArg = { + /** name of the Preferences */ + name: string; + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */ + fieldManager?: string; + /** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */ + fieldValidation?: string; + preferences: Preferences; +}; +export type DeletePreferencesApiResponse = /** status 200 OK */ Status | /** status 202 Accepted */ Status; +export type DeletePreferencesApiArg = { + /** name of the Preferences */ + name: string; + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. */ + gracePeriodSeconds?: number; + /** if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it */ + ignoreStoreReadErrorWithClusterBreakingPotential?: boolean; + /** Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. */ + orphanDependents?: boolean; + /** Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. */ + propagationPolicy?: string; +}; +export type UpdatePreferencesApiResponse = /** status 200 OK */ Preferences | /** status 201 Created */ Preferences; +export type UpdatePreferencesApiArg = { + /** name of the Preferences */ + name: string; + /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ + pretty?: string; + /** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */ + dryRun?: string; + /** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */ + fieldManager?: string; + /** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */ + fieldValidation?: string; + /** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */ + force?: boolean; + patch: Patch; +}; export type ListStarsApiResponse = /** status 200 OK */ StarsList; export type ListStarsApiArg = { /** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */ @@ -558,28 +678,6 @@ export type PreferencesSpec = { /** day of the week (sunday, monday, etc) */ weekStart?: string; }; -export type PreferencesstatusOperatorState = { - /** descriptiveState is an optional more descriptive state field which has no requirements on format */ - descriptiveState?: string; - /** details contains any extra information that is operator-specific */ - details?: { - [key: string]: object; - }; - /** lastEvaluation is the ResourceVersion last evaluated */ - lastEvaluation: string; - /** state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation. */ - state: string; -}; -export type PreferencesStatus = { - /** additionalFields is reserved for future use */ - additionalFields?: { - [key: string]: object; - }; - /** operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field. */ - operatorStates?: { - [key: string]: PreferencesstatusOperatorState; - }; -}; export type Preferences = { /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ apiVersion?: string; @@ -588,7 +686,6 @@ export type Preferences = { metadata: ObjectMeta; /** Spec is the spec of the Preferences */ spec: PreferencesSpec; - status: PreferencesStatus; }; export type ListMeta = { /** continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. */ @@ -608,55 +705,6 @@ export type PreferencesList = { kind?: string; metadata: ListMeta; }; -export type StarsResource = { - group: string; - kind: string; - /** The set of resources */ - names: string[]; -}; -export type StarsSpec = { - resource: StarsResource[]; -}; -export type StarsstatusOperatorState = { - /** descriptiveState is an optional more descriptive state field which has no requirements on format */ - descriptiveState?: string; - /** details contains any extra information that is operator-specific */ - details?: { - [key: string]: object; - }; - /** lastEvaluation is the ResourceVersion last evaluated */ - lastEvaluation: string; - /** state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation. */ - state: string; -}; -export type StarsStatus = { - /** additionalFields is reserved for future use */ - additionalFields?: { - [key: string]: object; - }; - /** operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field. */ - operatorStates?: { - [key: string]: StarsstatusOperatorState; - }; -}; -export type Stars = { - /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; - /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata: ObjectMeta; - /** Spec is the spec of the Stars */ - spec: StarsSpec; - status: StarsStatus; -}; -export type StarsList = { - /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; - items: Stars[]; - /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata: ListMeta; -}; export type StatusCause = { /** The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional. @@ -702,11 +750,41 @@ export type Status = { status?: string; }; export type Patch = object; +export type StarsResource = { + group: string; + kind: string; + /** The set of resources */ + names: string[]; +}; +export type StarsSpec = { + resource: StarsResource[]; +}; +export type Stars = { + /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ + apiVersion?: string; + /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ + kind?: string; + metadata: ObjectMeta; + /** Spec is the spec of the Stars */ + spec: StarsSpec; +}; +export type StarsList = { + /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ + apiVersion?: string; + items: Stars[]; + /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ + kind?: string; + metadata: ListMeta; +}; export const { useGetApiResourcesQuery, useListPreferencesQuery, + useCreatePreferencesMutation, useMergedPreferencesQuery, useGetPreferencesQuery, + useReplacePreferencesMutation, + useDeletePreferencesMutation, + useUpdatePreferencesMutation, useListStarsQuery, useCreateStarsMutation, useDeletecollectionStarsMutation, diff --git a/public/app/api/clients/provisioning/v0alpha1/endpoints.gen.ts b/public/app/api/clients/provisioning/v0alpha1/endpoints.gen.ts index b43299a4d8a..41e4cac3c01 100644 --- a/public/app/api/clients/provisioning/v0alpha1/endpoints.gen.ts +++ b/public/app/api/clients/provisioning/v0alpha1/endpoints.gen.ts @@ -1010,9 +1010,9 @@ export type JobResourceSummary = { /** Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */ errors?: string[]; group?: string; + kind?: string; /** No action required (useful for sync) */ noop?: number; - resource?: string; total?: number; update?: number; write?: number; diff --git a/public/app/api/clients/rules/v0alpha1/endpoints.gen.ts b/public/app/api/clients/rules/v0alpha1/endpoints.gen.ts index 1555dae0e46..6537a5061fc 100644 --- a/public/app/api/clients/rules/v0alpha1/endpoints.gen.ts +++ b/public/app/api/clients/rules/v0alpha1/endpoints.gen.ts @@ -931,11 +931,11 @@ export type AlertRuleStatus = { }; export type AlertRule = { /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; + apiVersion: string; /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata?: ObjectMeta; - spec?: AlertRuleSpec; + kind: string; + metadata: ObjectMeta; + spec: AlertRuleSpec; status?: AlertRuleStatus; }; export type ListMeta = { @@ -1070,11 +1070,11 @@ export type RecordingRuleStatus = { }; export type RecordingRule = { /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; + apiVersion: string; /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata?: ObjectMeta; - spec?: RecordingRuleSpec; + kind: string; + metadata: ObjectMeta; + spec: RecordingRuleSpec; status?: RecordingRuleStatus; }; export type RecordingRuleList = { diff --git a/public/app/api/clients/shorturl/v1alpha1/endpoints.gen.ts b/public/app/api/clients/shorturl/v1alpha1/endpoints.gen.ts index cfb88c9224b..9cabcb8ab7b 100644 --- a/public/app/api/clients/shorturl/v1alpha1/endpoints.gen.ts +++ b/public/app/api/clients/shorturl/v1alpha1/endpoints.gen.ts @@ -524,11 +524,11 @@ export type ShortUrlStatus = { }; export type ShortUrl = { /** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */ - apiVersion?: string; + apiVersion: string; /** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */ - kind?: string; - metadata?: ObjectMeta; - spec?: ShortUrlSpec; + kind: string; + metadata: ObjectMeta; + spec: ShortUrlSpec; status?: ShortUrlStatus; }; export type ListMeta = { diff --git a/public/app/api/createBaseQuery.ts b/public/app/api/createBaseQuery.ts index ac282ddc192..51037493e88 100644 --- a/public/app/api/createBaseQuery.ts +++ b/public/app/api/createBaseQuery.ts @@ -15,12 +15,26 @@ interface CreateBaseQueryOptions { export function createBaseQuery({ baseURL }: CreateBaseQueryOptions): BaseQueryFn { async function backendSrvBaseQuery(requestOptions: RequestOptions) { try { + const headers: Record = { + ...requestOptions.headers, + }; + + // Add Content-Type header for PATCH requests to /apis/ endpoints if not already set + if ( + requestOptions.method?.toUpperCase() === 'PATCH' && + baseURL?.startsWith('/apis/') && + !headers['Content-Type'] + ) { + headers['Content-Type'] = 'application/strategic-merge-patch+json'; + } + const { data: responseData, ...meta } = await lastValueFrom( getBackendSrv().fetch({ ...requestOptions, url: baseURL + requestOptions.url, showErrorAlert: requestOptions.showErrorAlert ?? false, data: requestOptions.body, + headers, }) ); return { data: responseData, meta }; diff --git a/public/app/core/components/Branding/OrangeBadge.tsx b/public/app/core/components/Branding/OrangeBadge.tsx index be9d080df79..3088c63cec5 100644 --- a/public/app/core/components/Branding/OrangeBadge.tsx +++ b/public/app/core/components/Branding/OrangeBadge.tsx @@ -1,19 +1,25 @@ -import { css } from '@emotion/css'; +import { css, cx } from '@emotion/css'; +import { HTMLAttributes } from 'react'; import { GrafanaTheme2 } from '@grafana/data'; import { Icon, useStyles2 } from '@grafana/ui'; -export function OrangeBadge({ text }: { text: string }) { - const styles = useStyles2(getStyles); +interface Props extends HTMLAttributes { + text?: string; + className?: string; +} + +export function OrangeBadge({ text, className, ...htmlProps }: Props) { + const styles = useStyles2(getStyles, text); return ( -
+
{text}
); } -const getStyles = (theme: GrafanaTheme2) => { +const getStyles = (theme: GrafanaTheme2, text: string | undefined) => { return { wrapper: css({ display: 'inline-flex', @@ -26,6 +32,11 @@ const getStyles = (theme: GrafanaTheme2) => { fontSize: theme.typography.bodySmall.fontSize, lineHeight: theme.typography.bodySmall.lineHeight, alignItems: 'center', + ...(text === undefined && { + svg: { + marginRight: 0, + }, + }), }), }; }; diff --git a/public/app/core/components/NestedFolderPicker/NestedFolderList.tsx b/public/app/core/components/NestedFolderPicker/NestedFolderList.tsx index aa099cff928..7e53679f84e 100644 --- a/public/app/core/components/NestedFolderPicker/NestedFolderList.tsx +++ b/public/app/core/components/NestedFolderPicker/NestedFolderList.tsx @@ -32,6 +32,7 @@ interface NestedFolderListProps { onFolderSelect: (item: DashboardViewItem) => void; isItemLoaded: (itemIndex: number) => boolean; requestLoadMore: (folderUid: string | undefined) => void; + emptyFolders: Set; } export function NestedFolderList({ @@ -44,6 +45,7 @@ export function NestedFolderList({ onFolderSelect, isItemLoaded, requestLoadMore, + emptyFolders, }: NestedFolderListProps) { const infiniteLoaderRef = useRef(null); const styles = useStyles2(getStyles); @@ -57,8 +59,18 @@ export function NestedFolderList({ onFolderExpand, onFolderSelect, idPrefix, + emptyFolders, }), - [items, focusedItemIndex, foldersAreOpenable, selectedFolder, onFolderExpand, onFolderSelect, idPrefix] + [ + items, + focusedItemIndex, + foldersAreOpenable, + selectedFolder, + onFolderExpand, + onFolderSelect, + idPrefix, + emptyFolders, + ] ); const handleIsItemLoaded = useCallback( @@ -119,8 +131,16 @@ interface RowProps { const SKELETON_WIDTHS = [100, 200, 130, 160, 150]; function Row({ index, style: virtualStyles, data }: RowProps) { - const { items, focusedItemIndex, foldersAreOpenable, selectedFolder, onFolderExpand, onFolderSelect, idPrefix } = - data; + const { + items, + focusedItemIndex, + foldersAreOpenable, + selectedFolder, + onFolderExpand, + onFolderSelect, + idPrefix, + emptyFolders, + } = data; const { item, isOpen, level, parentUID } = items[index]; const rowRef = useRef(null); const labelId = useId(); @@ -173,6 +193,11 @@ function Row({ index, style: virtualStyles, data }: RowProps) { ) : null; } + // We don't have a direct value of whether things are coming from user searching but this seems to be a good + // approximation as when searching all items will be at top level, while things that are actually in the top level + // when just looking at a folders tree should not have parent. + const isSearchItem = level === 0 && item.parentUID !== undefined; + return ( // don't need a key handler here, it's handled at the input level in NestedFolderPicker // eslint-disable-next-line jsx-a11y/click-events-have-key-events @@ -198,12 +223,17 @@ function Row({ index, style: virtualStyles, data }: RowProps) {
- {foldersAreOpenable ? ( + {foldersAreOpenable && !emptyFolders.has(item.uid) ? ( { + ev.preventDefault(); + ev.stopPropagation(); + }} // tabIndex not needed here because we handle keyboard navigation at the input level tabIndex={-1} aria-label={isOpen ? `Collapse folder ${item.title}` : `Expand folder ${item.title}`} @@ -217,7 +247,7 @@ function Row({ index, style: virtualStyles, data }: RowProps) { {item.title} - + {isSearchItem && }
); @@ -247,7 +277,7 @@ const getStyles = (theme: GrafanaTheme2) => { }), folderButtonSpacer: css({ - paddingLeft: theme.spacing(0.5), + paddingLeft: theme.spacing(2.5), }), row: css({ diff --git a/public/app/core/components/NestedFolderPicker/NestedFolderPicker.tsx b/public/app/core/components/NestedFolderPicker/NestedFolderPicker.tsx index 826f8ec5002..67c97879c19 100644 --- a/public/app/core/components/NestedFolderPicker/NestedFolderPicker.tsx +++ b/public/app/core/components/NestedFolderPicker/NestedFolderPicker.tsx @@ -111,6 +111,7 @@ export function NestedFolderPicker({ const isBrowsing = Boolean(overlayOpen && !(search && searchResults)); const { + emptyFolders, items: browseFlatTree, isLoading: isBrowseLoading, requestNextPage: fetchFolderPage, @@ -328,7 +329,7 @@ export function NestedFolderPicker({ : null} + prefix={label ? : } placeholder={label ?? t('browse-dashboards.folder-picker.search-placeholder', 'Search folders')} value={search} invalid={invalid} @@ -341,7 +342,6 @@ export function NestedFolderPicker({ aria-owns={overlayId} aria-activedescendant={getDOMId(overlayId, flatTree[focusedItemIndex]?.item.uid)} role="combobox" - suffix={} {...getReferenceProps()} onKeyDown={handleKeyDown} /> @@ -381,6 +381,7 @@ export function NestedFolderPicker({ foldersAreOpenable={!(search && searchResults)} isItemLoaded={isItemLoaded} requestLoadMore={handleLoadMore} + emptyFolders={emptyFolders} />
)} diff --git a/public/app/core/components/NestedFolderPicker/useFoldersQueryAppPlatform.ts b/public/app/core/components/NestedFolderPicker/useFoldersQueryAppPlatform.ts index 60ca40f798e..34d068589c9 100644 --- a/public/app/core/components/NestedFolderPicker/useFoldersQueryAppPlatform.ts +++ b/public/app/core/components/NestedFolderPicker/useFoldersQueryAppPlatform.ts @@ -40,6 +40,9 @@ export function useFoldersQueryAppPlatform({ // Keep a list of all request subscriptions so we can unsubscribe from them when the component is unmounted const requestsRef = useRef([]); + // Set of UIDs for which children were requested but were empty. + const [emptyFolders, setEmptyFolders] = useState>(new Set()); + // Keep a list of selectors for dynamic state selection const [selectors, setSelectors] = useState>>( [] @@ -145,6 +148,14 @@ export function useFoldersQueryAppPlatform({ const childResponse = folderIsOpen && state.responseByParent[name]; if (childResponse) { + // If we finished loading and there are no children add to empty list + if ( + childResponse.data && + childResponse.status !== QueryStatus.pending && + childResponse.data.hits.length === 0 + ) { + setEmptyFolders((prev) => new Set(prev).add(name)); + } const childFlatItems = createFlatList(name, childResponse, level + 1); return [flatItem, ...childFlatItems]; } @@ -168,6 +179,7 @@ export function useFoldersQueryAppPlatform({ }, [state, isBrowsing, openFolders, rootFolderUID, rootFolderItem]); return { + emptyFolders, items: treeList, isLoading: state.isLoading, requestNextPage, diff --git a/public/app/core/components/NestedFolderPicker/useFoldersQueryLegacy.ts b/public/app/core/components/NestedFolderPicker/useFoldersQueryLegacy.ts index fa07abccf31..24eb4008b93 100644 --- a/public/app/core/components/NestedFolderPicker/useFoldersQueryLegacy.ts +++ b/public/app/core/components/NestedFolderPicker/useFoldersQueryLegacy.ts @@ -58,6 +58,9 @@ export function useFoldersQueryLegacy({ // Keep a list of all request subscriptions so we can unsubscribe from them when the component is unmounted const requestsRef = useRef([]); + // Set of UIDs for which children were requested but were empty. + const [emptyFolders, setEmptyFolders] = useState>(new Set()); + // Keep a list of selectors for dynamic state selection const [selectors, setSelectors] = useState< Array> @@ -165,8 +168,15 @@ export function useFoldersQueryLegacy({ }; const childPages = folderIsOpen && state.pagesByParent[item.uid]; + if (childPages) { const childFlatItems = createFlatList(item.uid, childPages, level + 1); + + // If we finished loading and there are no children add to empty list + if (childPages[0] && childPages[0].status !== PENDING_STATUS && childFlatItems.length === 0) { + setEmptyFolders((prev) => new Set(prev).add(item.uid)); + } + return [flatItem, ...childFlatItems]; } @@ -191,6 +201,7 @@ export function useFoldersQueryLegacy({ }, [state, isBrowsing, openFolders, rootFolderUID, rootFolderItem]); return { + emptyFolders, items: treeList, isLoading: state.isLoading, requestNextPage, diff --git a/public/app/core/components/Upgrade/ProBadge.tsx b/public/app/core/components/Upgrade/ProBadge.tsx index a7e9eec4b50..dc5aadf5d13 100644 --- a/public/app/core/components/Upgrade/ProBadge.tsx +++ b/public/app/core/components/Upgrade/ProBadge.tsx @@ -5,13 +5,14 @@ import { GrafanaTheme2 } from '@grafana/data'; import { reportExperimentView } from '@grafana/runtime'; import { useStyles2 } from '@grafana/ui'; +import { OrangeBadge } from '../Branding/OrangeBadge'; + export interface Props extends HTMLAttributes { - text?: string; experimentId?: string; eventVariant?: string; } -export const ProBadge = ({ text = 'PRO', className, experimentId, eventVariant = '', ...htmlProps }: Props) => { +export const ProBadge = ({ className, experimentId, eventVariant = '', ...htmlProps }: Props) => { const styles = useStyles2(getStyles); useEffect(() => { @@ -20,23 +21,13 @@ export const ProBadge = ({ text = 'PRO', className, experimentId, eventVariant = } }, [experimentId, eventVariant]); - return ( - - {text} - - ); + return ; }; const getStyles = (theme: GrafanaTheme2) => { return { badge: css({ marginLeft: theme.spacing(1.25), - borderRadius: theme.shape.borderRadius(5), - backgroundColor: theme.colors.success.main, - padding: theme.spacing(0.25, 0.75), - color: 'white', // use the same color for both themes - fontWeight: theme.typography.fontWeightMedium, - fontSize: theme.typography.pxToRem(10), }), }; }; diff --git a/public/app/core/utils/fetch.ts b/public/app/core/utils/fetch.ts index e714e944dc9..5be200a2eee 100644 --- a/public/app/core/utils/fetch.ts +++ b/public/app/core/utils/fetch.ts @@ -91,7 +91,9 @@ export const isContentTypeJson = (headers: Headers) => { const contentType = headers.get('content-type'); if ( contentType && - (contentType.toLowerCase() === 'application/json' || contentType.toLowerCase() === 'application/merge-patch+json') + ['application/json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'].includes( + contentType.toLowerCase() + ) ) { return true; } diff --git a/public/app/core/utils/navBarItem-translations.ts b/public/app/core/utils/navBarItem-translations.ts index 8d00c2049de..80f1d36aa34 100644 --- a/public/app/core/utils/navBarItem-translations.ts +++ b/public/app/core/utils/navBarItem-translations.ts @@ -154,7 +154,7 @@ export function getNavTitle(navId: string | undefined) { case 'plugin-page-grafana-k8s-app': return t('nav.kubernetes.title', 'Kubernetes'); case 'plugin-page-grafana-dbo11y-app': - return t('nav.databases.title', 'Databases'); + return t('nav.databases.title', 'Database'); case 'plugin-page-grafana-app-observability-app': return t('nav.application.title', 'Application'); case 'plugin-page-grafana-pyroscope-app': diff --git a/public/app/core/utils/shortLinks.ts b/public/app/core/utils/shortLinks.ts index b9187f7ca4c..eeaceaa298f 100644 --- a/public/app/core/utils/shortLinks.ts +++ b/public/app/core/utils/shortLinks.ts @@ -48,6 +48,9 @@ export const createShortLink = async function (path: string) { const result = await dispatch( generatedAPI.endpoints.createShortUrl.initiate({ shortUrl: { + apiVersion: 'shorturl.grafana.app/v1alpha1', + kind: 'Playlist', + metadata: {}, spec: { path: getRelativeURLPath(path), }, diff --git a/public/app/features/admin/AdminFeatureTogglesAPI.test.ts b/public/app/features/admin/AdminFeatureTogglesAPI.test.ts deleted file mode 100644 index ca6c25be525..00000000000 --- a/public/app/features/admin/AdminFeatureTogglesAPI.test.ts +++ /dev/null @@ -1,102 +0,0 @@ -import { BackendSrvRequest, config } from '@grafana/runtime'; - -import { getTogglesAPI } from './AdminFeatureTogglesAPI'; - -// implements @grafana/runtime/BackendSrv -class MockSrv { - constructor() { - this.apiCalls = []; - } - - apiCalls: Array<{ - url: string; - method: string; - }>; - - async get( - url: string, - params?: BackendSrvRequest['params'], - requestId?: BackendSrvRequest['requestId'], - options?: Partial - ) { - this.apiCalls.push({ - url: url, - method: 'get', - }); - if (config.featureToggles.kubernetesFeatureToggles && url.indexOf('current') > -1) { - return await { toggles: [] }; - } - - return await {}; - } - - async post(url: string, data?: unknown, options?: Partial) { - this.apiCalls.push({ - url: url, - method: 'post', - }); - return await {}; - } - - async patch(url: string, data: unknown, options?: Partial) { - this.apiCalls.push({ - url: url, - method: 'patch', - }); - return await {}; - } - - // these aren't needed for this test - async put(url: string, data: unknown, options?: Partial) { - return await {}; - } - async delete(url: string, data?: unknown, options?: Partial) { - return await {}; - } -} - -const testBackendSrv = new MockSrv(); - -jest.mock('@grafana/runtime', () => ({ - ...jest.requireActual('@grafana/runtime'), - getBackendSrv: () => testBackendSrv, - config: { - featureToggles: { - kubernetesFeatureToggles: false, - grafanaAPIServerWithExperimentalAPIs: false, - }, - }, -})); - -describe('AdminFeatureTogglesApi', () => { - beforeEach(() => { - jest.clearAllMocks(); - testBackendSrv.apiCalls.length = 0; - }); - - const originalToggles = { ...config.featureToggles }; - - afterAll(() => { - config.featureToggles = originalToggles; - }); - - it('uses the k8s api when the k8s toggles are on', async () => { - config.featureToggles.kubernetesFeatureToggles = true; - config.featureToggles.grafanaAPIServerWithExperimentalAPIs = true; - - const togglesApi = getTogglesAPI(); - await togglesApi.getFeatureToggles(); - await togglesApi.updateFeatureToggles([]); - const expected = [ - { - method: 'get', - url: '/apis/featuretoggle.grafana.app/v0alpha1/current', - }, - { - method: 'patch', - url: '/apis/featuretoggle.grafana.app/v0alpha1/current', - }, - ]; - expect(testBackendSrv.apiCalls).toEqual(expect.arrayContaining(expected)); - }); -}); diff --git a/public/app/features/admin/AdminFeatureTogglesAPI.ts b/public/app/features/admin/AdminFeatureTogglesAPI.ts deleted file mode 100644 index 6ecba0c8b7d..00000000000 --- a/public/app/features/admin/AdminFeatureTogglesAPI.ts +++ /dev/null @@ -1,77 +0,0 @@ -import { getBackendSrv } from '@grafana/runtime'; - -export type FeatureToggle = { - name: string; - description?: string; - enabled: boolean; - stage: string; - readOnly?: boolean; - hidden?: boolean; -}; - -export type CurrentTogglesState = { - restartRequired: boolean; - allowEditing: boolean; - toggles: FeatureToggle[]; -}; - -interface ResolvedToggleState { - kind: 'ResolvedToggleState'; - restartRequired?: boolean; - allowEditing?: boolean; - toggles?: K8sToggleSpec[]; // not used in patch - enabled: { [key: string]: boolean }; -} - -interface K8sToggleSpec { - name: string; - description: string; - enabled: boolean; - writeable: boolean; - source: K8sToggleSource; - stage: string; -} - -interface K8sToggleSource { - namespace: string; - name: string; -} - -interface FeatureTogglesAPI { - getFeatureToggles(): Promise; - updateFeatureToggles(toggles: FeatureToggle[]): Promise; -} - -class K8sAPI implements FeatureTogglesAPI { - baseURL = '/apis/featuretoggle.grafana.app/v0alpha1'; - - async getFeatureToggles(): Promise { - const current = await getBackendSrv().get(this.baseURL + '/current'); - return { - restartRequired: Boolean(current.restartRequired), - allowEditing: Boolean(current.allowEditing), - toggles: current.toggles!.map((t) => ({ - name: t.name, - description: t.description!, - enabled: t.enabled, - readOnly: !Boolean(t.writeable), - stage: t.stage, - hidden: false, // only return visible things - })), - }; - } - updateFeatureToggles(toggles: FeatureToggle[]): Promise { - const patchBody: ResolvedToggleState = { - kind: 'ResolvedToggleState', - enabled: {}, - }; - toggles.forEach((t) => { - patchBody.enabled[t.name] = t.enabled; - }); - return getBackendSrv().patch(this.baseURL + '/current', patchBody); - } -} - -export const getTogglesAPI = (): FeatureTogglesAPI => { - return new K8sAPI(); -}; diff --git a/public/app/features/admin/AdminFeatureTogglesPage.tsx b/public/app/features/admin/AdminFeatureTogglesPage.tsx deleted file mode 100644 index 113847983e1..00000000000 --- a/public/app/features/admin/AdminFeatureTogglesPage.tsx +++ /dev/null @@ -1,96 +0,0 @@ -import { css } from '@emotion/css'; -import { useState } from 'react'; -import { useAsync } from 'react-use'; - -import { GrafanaTheme2 } from '@grafana/data'; -import { Trans, t } from '@grafana/i18n'; -import { useStyles2, Icon, TextLink } from '@grafana/ui'; -import { Page } from 'app/core/components/Page/Page'; - -import { getTogglesAPI } from './AdminFeatureTogglesAPI'; -import { AdminFeatureTogglesTable } from './AdminFeatureTogglesTable'; - -export default function AdminFeatureTogglesPage() { - const [reload, setReload] = useState(1); - const togglesApi = getTogglesAPI(); - const featureState = useAsync(() => togglesApi.getFeatureToggles(), [reload]); - const styles = useStyles2(getStyles); - - const handleUpdateSuccess = () => { - setReload(reload + 1); - }; - - const EditingAlert = () => { - return ( -
-
- -
- - {featureState.value?.restartRequired - ? t( - 'admin.feature-toggles.restart-pending', - 'A restart is pending for your Grafana instance to apply the latest feature toggle changes' - ) - : t( - 'admin.feature-toggles.restart-required', - 'Saving feature toggle changes will prompt a restart of the instance, which may take a few minutes' - )} - -
- ); - }; - - const subTitle = ( -
- - View and edit feature toggles. Read more about feature toggles at{' '} - - grafana.com - - . - -
- ); - - return ( - - - <> - {featureState.error?.message} - {featureState.loading && 'Fetching feature toggles'} - - - {featureState.value && ( - - )} - - - - ); -} - -function getStyles(theme: GrafanaTheme2) { - return { - warning: css({ - display: 'flex', - marginTop: theme.spacing(0.25), - marginBottom: theme.spacing(0.25), - }), - icon: css({ - color: theme.colors.warning.main, - paddingRight: theme.spacing(), - }), - message: css({ - color: theme.colors.text.secondary, - marginTop: theme.spacing(0.25), - }), - }; -} diff --git a/public/app/features/admin/AdminFeatureTogglesTable.tsx b/public/app/features/admin/AdminFeatureTogglesTable.tsx deleted file mode 100644 index 65b6112652c..00000000000 --- a/public/app/features/admin/AdminFeatureTogglesTable.tsx +++ /dev/null @@ -1,201 +0,0 @@ -import { useState, useRef } from 'react'; - -import { Trans, t } from '@grafana/i18n'; -import { Switch, InteractiveTable, Tooltip, type CellProps, Button, ConfirmModal, type SortByFn } from '@grafana/ui'; - -import { FeatureToggle, getTogglesAPI } from './AdminFeatureTogglesAPI'; - -interface Props { - featureToggles: FeatureToggle[]; - allowEditing: boolean; - onUpdateSuccess: () => void; -} - -const sortByName: SortByFn = (a, b) => { - return a.original.name.localeCompare(b.original.name); -}; - -const sortByDescription: SortByFn = (a, b) => { - if (!a.original.description && !b.original.description) { - return 0; - } else if (!a.original.description) { - return 1; - } else if (!b.original.description) { - return -1; - } - return a.original.description.localeCompare(b.original.description); -}; - -const sortByEnabled: SortByFn = (a, b) => { - return a.original.enabled === b.original.enabled ? 0 : a.original.enabled ? 1 : -1; -}; - -export function AdminFeatureTogglesTable({ featureToggles, allowEditing, onUpdateSuccess }: Props) { - // sort manually, doesn't look like it can be automatically done in the table - featureToggles.sort((a, b) => a.name.localeCompare(b.name)); - const serverToggles = useRef(featureToggles); - const [localToggles, setLocalToggles] = useState(featureToggles); - const [isSaving, setIsSaving] = useState(false); - const [showSaveModel, setShowSaveModal] = useState(false); - - const togglesApi = getTogglesAPI(); - - const handleToggleChange = (toggle: FeatureToggle, newValue: boolean) => { - const updatedToggle = { ...toggle, enabled: newValue }; - - // Update the local state - const updatedToggles = localToggles.map((t) => (t.name === toggle.name ? updatedToggle : t)); - setLocalToggles(updatedToggles); - }; - - const handleSaveChanges = async () => { - setIsSaving(true); - try { - const modifiedToggles = getModifiedToggles(); - await togglesApi.updateFeatureToggles(modifiedToggles); - // Pretend the values came from a new request - serverToggles.current = [...localToggles]; - onUpdateSuccess(); // should trigger a new get - } finally { - setIsSaving(false); - } - }; - - const saveButtonRef = useRef(null); - const showSaveChangesModal = (show: boolean) => () => { - setShowSaveModal(show); - if (!show && saveButtonRef.current) { - saveButtonRef.current.focus(); - } - }; - - const getModifiedToggles = (): FeatureToggle[] => { - return localToggles.filter((toggle, index) => toggle.enabled !== serverToggles.current[index].enabled); - }; - - const hasModifications = () => { - // Check if there are any differences between the original toggles and the local toggles - return localToggles.some((toggle, index) => toggle.enabled !== serverToggles.current[index].enabled); - }; - - const getToggleTooltipContent = (readOnlyToggle?: boolean) => { - if (!allowEditing) { - return 'Feature management is not configured for editing'; - } - if (readOnlyToggle) { - return 'This is a non-editable feature'; - } - return ''; - }; - - const getStageCell = (stage: string) => { - switch (stage) { - case 'GA': - return ( - -
- GA -
-
- ); - case 'privatePreview': - case 'preview': - case 'experimental': - return t('admin.admin-feature-toggles-table.get-stage-cell.beta', 'Beta'); - case 'deprecated': - return t('admin.admin-feature-toggles-table.get-stage-cell.deprecated', 'Deprecated'); - default: - return stage; - } - }; - - const columns = [ - { - id: 'name', - header: 'Name', - cell: ({ cell: { value } }: CellProps) =>
{value}
, - sortType: sortByName, - }, - { - id: 'description', - header: 'Description', - cell: ({ cell: { value } }: CellProps) =>
{value}
, - sortType: sortByDescription, - }, - { - id: 'stage', - header: 'Stage', - cell: ({ cell: { value } }: CellProps) =>
{getStageCell(value)}
, - }, - { - id: 'enabled', - header: 'State', - cell: ({ row }: CellProps) => { - const renderStateSwitch = ( -
- handleToggleChange(row.original, e.currentTarget.checked)} - /> -
- ); - - return row.original.readOnly ? ( - {renderStateSwitch} - ) : ( - renderStateSwitch - ); - }, - sortType: sortByEnabled, - }, - ]; - - return ( - <> - {allowEditing && ( -
- - -

- - Some features are stable (GA) and enabled by default, whereas some are currently in their - preliminary Beta phase, available for early adoption. - -

-

- - We advise understanding the implications of each feature change before making modifications. - -

-
- } - confirmText={t('admin.admin-feature-toggles-table.confirmText-save-changes', 'Save changes')} - onConfirm={async () => { - showSaveChangesModal(false)(); - handleSaveChanges(); - }} - onDismiss={showSaveChangesModal(false)} - /> -
- )} - featureToggle.name} /> - - ); -} diff --git a/public/app/features/admin/UserLdapSyncInfo.tsx b/public/app/features/admin/UserLdapSyncInfo.tsx index ca1d0ec148a..1baf9e219a2 100644 --- a/public/app/features/admin/UserLdapSyncInfo.tsx +++ b/public/app/features/admin/UserLdapSyncInfo.tsx @@ -30,7 +30,7 @@ export class UserLdapSyncInfo extends PureComponent { const { ldapSyncInfo, user } = this.props; const nextSyncSuccessful = ldapSyncInfo && ldapSyncInfo.nextSync; const nextSyncTime = nextSyncSuccessful ? dateTimeFormat(ldapSyncInfo.nextSync, { format }) : ''; - const debugLDAPMappingURL = `${debugLDAPMappingBaseURL}?user=${user && user.login}`; + const debugLDAPMappingURL = `${debugLDAPMappingBaseURL}?username=${user && user.login}`; const canReadLDAPUser = contextSrv.hasPermission(AccessControlAction.LDAPUsersRead); const canSyncLDAPUser = contextSrv.hasPermission(AccessControlAction.LDAPUsersSync); diff --git a/public/app/features/alerting/unified/components/mute-timings/MuteTimingsTable.test.tsx b/public/app/features/alerting/unified/components/mute-timings/MuteTimingsTable.test.tsx index d9d611d7a00..0bce93870f9 100644 --- a/public/app/features/alerting/unified/components/mute-timings/MuteTimingsTable.test.tsx +++ b/public/app/features/alerting/unified/components/mute-timings/MuteTimingsTable.test.tsx @@ -1,5 +1,6 @@ import { render, screen, userEvent, within } from 'test/test-utils'; +import { base64UrlEncode } from '@grafana/alerting'; import { setupMswServer } from 'app/features/alerting/unified/mockApi'; import { setMuteTimingsListError, @@ -10,7 +11,7 @@ import { captureRequests } from 'app/features/alerting/unified/mocks/server/even import { AccessControlAction } from 'app/types/accessControl'; import { grantUserPermissions } from '../../mocks'; -import { TIME_INTERVAL_UID_HAPPY_PATH } from '../../mocks/server/handlers/k8s/timeIntervals.k8s'; +import { TIME_INTERVAL_NAME_HAPPY_PATH } from '../../mocks/server/handlers/k8s/timeIntervals.k8s'; import { AlertmanagerProvider } from '../../state/AlertmanagerContext'; import { GRAFANA_RULES_SOURCE_NAME } from '../../utils/datasource'; @@ -113,8 +114,9 @@ describe('MuteTimingsTable', () => { await user.click(await screen.findByRole('button', { name: /delete/i })); const requests = await capture; + const encodedName = base64UrlEncode(TIME_INTERVAL_NAME_HAPPY_PATH); const deleteRequest = requests.find( - (r) => r.url.includes(`timeintervals/${TIME_INTERVAL_UID_HAPPY_PATH}`) && r.method === 'DELETE' + (r) => r.url.includes(`timeintervals/${encodedName}`) && r.method === 'DELETE' ); expect(deleteRequest).toBeDefined(); diff --git a/public/app/features/alerting/unified/components/mute-timings/useMuteTimings.tsx b/public/app/features/alerting/unified/components/mute-timings/useMuteTimings.tsx index aad08e47548..94e290a2087 100644 --- a/public/app/features/alerting/unified/components/mute-timings/useMuteTimings.tsx +++ b/public/app/features/alerting/unified/components/mute-timings/useMuteTimings.tsx @@ -1,5 +1,6 @@ import { useEffect } from 'react'; +import { base64UrlEncode } from '@grafana/alerting'; import { alertmanagerApi } from 'app/features/alerting/unified/api/alertmanagerApi'; import { timeIntervalsApi } from 'app/features/alerting/unified/api/timeIntervalsApi'; import { mergeTimeIntervals } from 'app/features/alerting/unified/components/mute-timings/util'; @@ -10,9 +11,9 @@ import { import { BaseAlertmanagerArgs, Skippable } from 'app/features/alerting/unified/types/hooks'; import { PROVENANCE_NONE } from 'app/features/alerting/unified/utils/k8s/constants'; import { - encodeFieldSelector, isK8sEntityProvisioned, shouldUseK8sApi, + stringifyFieldSelector, } from 'app/features/alerting/unified/utils/k8s/utils'; import { MuteTimeInterval } from 'app/plugins/datasource/alertmanager/types'; @@ -203,8 +204,10 @@ export const useGetMuteTiming = ({ alertmanager, name: nameToFind }: BaseAlertma useEffect(() => { if (useK8sApi) { const namespace = getAPINamespace(); - const entityName = encodeFieldSelector(nameToFind); - getGrafanaTimeInterval({ namespace, fieldSelector: `spec.name=${entityName}` }, true); + getGrafanaTimeInterval( + { namespace, fieldSelector: stringifyFieldSelector([['metadata.name', base64UrlEncode(nameToFind)]]) }, + true + ); } else { getAlertmanagerTimeInterval(alertmanager, true); } diff --git a/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.test.tsx b/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.test.tsx new file mode 100644 index 00000000000..4fda03e9d53 --- /dev/null +++ b/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.test.tsx @@ -0,0 +1,248 @@ +import 'core-js/stable/structured-clone'; +import { FormProvider, useForm } from 'react-hook-form'; +import { clickSelectOption } from 'test/helpers/selectOptionInTest'; +import { render } from 'test/test-utils'; +import { byRole, byTestId } from 'testing-library-selector'; + +import { grafanaAlertNotifiers } from 'app/features/alerting/unified/mockGrafanaNotifiers'; +import { AlertmanagerProvider } from 'app/features/alerting/unified/state/AlertmanagerContext'; + +import { ChannelSubForm } from './ChannelSubForm'; +import { GrafanaCommonChannelSettings } from './GrafanaCommonChannelSettings'; +import { Notifier } from './notifiers'; + +type TestChannelValues = { + __id: string; + type: string; + settings: Record; + secureFields: Record; +}; + +type TestReceiverFormValues = { + name: string; + items: TestChannelValues[]; +}; + +const ui = { + typeSelector: byTestId('items.0.type'), + settings: { + webhook: { + url: byRole('textbox', { name: /^URL/ }), + optionalSettings: byRole('button', { name: /optional webhook settings/i }), + title: { + container: byTestId('items.0.settings.title'), + input: byRole('textbox', { name: /^Title/ }), + }, + message: { + container: byTestId('items.0.settings.message'), + input: byRole('textbox', { name: /^Message/ }), + }, + }, + slack: { + recipient: byTestId('items.0.settings.recipient'), + token: byTestId('items.0.settings.token'), + username: byTestId('items.0.settings.username'), + webhookUrl: byRole('textbox', { name: /^Webhook URL/ }), + }, + googlechat: { + optionalSettings: byRole('button', { name: /optional google hangouts chat settings/i }), + url: byRole('textbox', { name: /^URL/ }), + title: { + input: byRole('textbox', { name: /^Title/ }), + container: byTestId('items.0.settings.title'), + }, + message: { + input: byRole('textbox', { name: /^Message/ }), + container: byTestId('items.0.settings.message'), + }, + }, + }, +}; + +const notifiers: Notifier[] = [ + { dto: grafanaAlertNotifiers.webhook, meta: { enabled: true, order: 1 } }, + { dto: grafanaAlertNotifiers.slack, meta: { enabled: true, order: 2 } }, + { dto: grafanaAlertNotifiers.googlechat, meta: { enabled: true, order: 3 } }, + { dto: grafanaAlertNotifiers.sns, meta: { enabled: true, order: 4 } }, + { dto: grafanaAlertNotifiers.oncall, meta: { enabled: true, order: 5 } }, +]; + +describe('ChannelSubForm', () => { + function TestFormWrapper({ defaults, initial }: { defaults: TestChannelValues; initial?: TestChannelValues }) { + const form = useForm({ + defaultValues: { + name: 'test-contact-point', + items: [defaults], + }, + }); + + return ( + + + + + + ); + } + + function renderForm(defaults: TestChannelValues, initial?: TestChannelValues) { + return render(); + } + + it('switching type hides prior fields and shows new ones', async () => { + renderForm({ + __id: 'id-0', + type: 'webhook', + settings: { url: '' }, + secureFields: {}, + }); + + expect(ui.typeSelector.get()).toHaveTextContent('Webhook'); + + expect(ui.settings.webhook.url.get()).toBeInTheDocument(); + + expect(ui.settings.slack.recipient.query()).not.toBeInTheDocument(); + + await clickSelectOption(ui.typeSelector.get(), 'Slack'); + expect(ui.typeSelector.get()).toHaveTextContent('Slack'); + + expect(ui.settings.slack.recipient.get()).toBeInTheDocument(); + expect(ui.settings.slack.token.get()).toBeInTheDocument(); + expect(ui.settings.slack.username.get()).toBeInTheDocument(); + }); + + it('should clear secure fields when switching integration types', async () => { + const googlechatDefaults: TestChannelValues = { + __id: 'id-0', + type: 'googlechat', + settings: { title: 'Alert Title', message: 'Alert Message' }, + secureFields: { url: true }, + }; + + const { user } = renderForm(googlechatDefaults, googlechatDefaults); + + expect(ui.typeSelector.get()).toHaveTextContent('Google Hangouts Chat'); + + expect(ui.settings.googlechat.url.get()).toBeDisabled(); + expect(ui.settings.googlechat.url.get()).toHaveValue('configured'); + + await user.click(ui.settings.googlechat.optionalSettings.get()); + + expect(ui.settings.googlechat.title.input.get()).toHaveValue('Alert Title'); + expect(ui.settings.googlechat.message.input.get()).toHaveValue('Alert Message'); + + await clickSelectOption(ui.typeSelector.get(), 'Webhook'); + expect(ui.typeSelector.get()).toHaveTextContent('Webhook'); + + // Webhook URL field should now be present and empty (settings cleared) + expect(ui.settings.webhook.url.get()).toHaveValue(''); + expect(ui.settings.webhook.title.container.get()).toBeInTheDocument(); + expect(ui.settings.webhook.message.container.get()).toBeInTheDocument(); + + // If value for templated fields is empty the input should not be present + expect(ui.settings.webhook.message.input.query()).not.toBeInTheDocument(); + expect(ui.settings.webhook.title.input.query()).not.toBeInTheDocument(); + }); + + it('should clear settings when switching from webhook to googlechat', async () => { + const webhookDefaults: TestChannelValues = { + __id: 'id-0', + type: 'webhook', + settings: { url: 'https://example.com/webhook', title: 'Webhook Title', message: 'Webhook Message' }, + secureFields: {}, + }; + + const { user } = renderForm(webhookDefaults, webhookDefaults); + + expect(ui.typeSelector.get()).toHaveTextContent('Webhook'); + + expect(ui.settings.webhook.url.get()).toHaveValue('https://example.com/webhook'); + + await user.click(ui.settings.webhook.optionalSettings.get()); + expect(ui.settings.webhook.title.input.get()).toHaveValue('Webhook Title'); + expect(ui.settings.webhook.message.input.get()).toHaveValue('Webhook Message'); + + await clickSelectOption(ui.typeSelector.get(), 'Google Hangouts Chat'); + expect(ui.typeSelector.get()).toHaveTextContent('Google Hangouts Chat'); + + // Google Chat URL field should now be present and empty (settings cleared) + expect(ui.settings.googlechat.url.get()).toHaveValue(''); + expect(ui.settings.googlechat.title.container.get()).toBeInTheDocument(); + expect(ui.settings.googlechat.message.container.get()).toBeInTheDocument(); + + // If value for templated fields is empty the input should not be present + expect(ui.settings.googlechat.message.input.query()).not.toBeInTheDocument(); + expect(ui.settings.googlechat.title.input.query()).not.toBeInTheDocument(); + }); + + it('should restore initial values when switching back to original type', async () => { + const googlechatDefaults: TestChannelValues = { + __id: 'id-0', + type: 'googlechat', + settings: { title: 'Original Title', message: 'Original Message' }, + secureFields: { url: true }, + }; + + const { user } = renderForm(googlechatDefaults, googlechatDefaults); + + expect(ui.typeSelector.get()).toHaveTextContent('Google Hangouts Chat'); + + expect(ui.settings.googlechat.url.get()).toBeDisabled(); + expect(ui.settings.googlechat.url.get()).toHaveValue('configured'); + + await user.click(ui.settings.googlechat.optionalSettings.get()); + + expect(ui.settings.googlechat.title.input.get()).toHaveValue('Original Title'); + expect(ui.settings.googlechat.message.input.get()).toHaveValue('Original Message'); + + // Switch to a different type + await clickSelectOption(ui.typeSelector.get(), 'Webhook'); + expect(ui.typeSelector.get()).toHaveTextContent('Webhook'); + expect(ui.settings.webhook.url.get()).toHaveValue(''); + + // Switch back to the original type + await clickSelectOption(ui.typeSelector.get(), 'Google Hangouts Chat'); + expect(ui.typeSelector.get()).toHaveTextContent('Google Hangouts Chat'); + + // Original settings and secure fields should be restored + expect(ui.settings.googlechat.url.get()).toBeDisabled(); + expect(ui.settings.googlechat.url.get()).toHaveValue('configured'); + + expect(ui.settings.googlechat.title.input.get()).toHaveValue('Original Title'); + expect(ui.settings.googlechat.message.input.get()).toHaveValue('Original Message'); + }); + + it('should maintain secure field isolation across multiple type switches', async () => { + const googlechatDefaults: TestChannelValues = { + __id: 'id-0', + type: 'googlechat', + settings: {}, + secureFields: { url: true }, + }; + + renderForm(googlechatDefaults, googlechatDefaults); + + expect(ui.typeSelector.get()).toHaveTextContent('Google Hangouts Chat'); + expect(ui.settings.googlechat.url.get()).toBeDisabled(); + expect(ui.settings.googlechat.url.get()).toHaveValue('configured'); + + // Switch to Slack + await clickSelectOption(ui.typeSelector.get(), 'Slack'); + expect(ui.typeSelector.get()).toHaveTextContent('Slack'); + + // Slack should not have any secure fields from Google Chat + const slackUrl = ui.settings.slack.webhookUrl.get(); + expect(slackUrl).toBeEnabled(); + expect(slackUrl).toHaveValue(''); + }); +}); diff --git a/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.tsx b/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.tsx index ca939d25d38..499fd7bdaee 100644 --- a/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.tsx +++ b/public/app/features/alerting/unified/components/receivers/form/ChannelSubForm.tsx @@ -62,6 +62,7 @@ export function ChannelSubForm({ const channelFieldPath = `items.${integrationIndex}` as const; const typeFieldPath = `${channelFieldPath}.type` as const; const settingsFieldPath = `${channelFieldPath}.settings` as const; + const secureFieldsPath = `${channelFieldPath}.secureFields` as const; const selectedType = watch(typeFieldPath) ?? defaultValues.type; const parse_mode = watch(`${settingsFieldPath}.parse_mode`); @@ -83,10 +84,28 @@ export function ChannelSubForm({ // Restore values when switching back from a changed integration to the default one const subscription = watch((formValues, { name, type }) => { // @ts-expect-error name is valid key for formValues - const value = name ? formValues[name] : ''; + const value = name ? getValues(name, formValues) : ''; if (initialValues && name === typeFieldPath && value === initialValues.type && type === 'change') { setValue(settingsFieldPath, initialValues.settings); + setValue(secureFieldsPath, initialValues.secureFields); + } else if (name === typeFieldPath && type === 'change') { + // When switching to a new notifier, set the default settings to remove all existing settings + // from the previous notifier + const newNotifier = notifiers.find(({ dto: { type } }) => type === value); + const defaultNotifierSettings = newNotifier ? getDefaultNotifierSettings(newNotifier) : {}; + + // Not sure why, but verriding settingsFieldPath is not enough if notifiers have the same settings fields, like url, title + const currentSettings = getValues(settingsFieldPath) ?? {}; + Object.keys(currentSettings).forEach((key) => { + if (!defaultNotifierSettings[key]) { + setValue(`${settingsFieldPath}.${key}`, defaultNotifierSettings[key]); + } + }); + + setValue(settingsFieldPath, defaultNotifierSettings); + setValue(secureFieldsPath, {}); } + // Restore initial value of an existing oncall integration if ( initialValues && @@ -98,7 +117,19 @@ export function ChannelSubForm({ }); return () => subscription.unsubscribe(); - }, [selectedType, initialValues, setValue, settingsFieldPath, typeFieldPath, watch]); + }, [ + selectedType, + initialValues, + setValue, + settingsFieldPath, + typeFieldPath, + secureFieldsPath, + getValues, + watch, + defaultValues.settings, + defaultValues.secureFields, + notifiers, + ]); const onResetSecureField = (key: string) => { // formSecureFields might not be up to date if this function is called multiple times in a row @@ -294,6 +325,16 @@ export function ChannelSubForm({ ); } +function getDefaultNotifierSettings(notifier: Notifier): Record { + const defaultSettings: Record = {}; + notifier.dto.options.forEach((option) => { + if (option.defaultValue?.value) { + defaultSettings[option.propertyName] = option.defaultValue?.value; + } + }); + return defaultSettings; +} + const getStyles = (theme: GrafanaTheme2) => ({ buttons: css({ '& > * + *': { diff --git a/public/app/features/alerting/unified/components/receivers/form/GrafanaReceiverForm.test.tsx b/public/app/features/alerting/unified/components/receivers/form/GrafanaReceiverForm.test.tsx index af23e5dc423..e07d314f6a0 100644 --- a/public/app/features/alerting/unified/components/receivers/form/GrafanaReceiverForm.test.tsx +++ b/public/app/features/alerting/unified/components/receivers/form/GrafanaReceiverForm.test.tsx @@ -387,7 +387,7 @@ describe('GrafanaReceiverForm', () => { await user.click(newIntegrationRadio.get()); expect(newIntegrationRadio.get()).toBeChecked(); - await user.type(ui.newOnCallIntegrationName.get(), 'emea-oncall'); + await user.type(await ui.newOnCallIntegrationName.find(), 'emea-oncall'); // eslint-disable-next-line testing-library/no-node-access expect(ui.integrationType.get().closest('form')).toHaveFormValues({ diff --git a/public/app/features/alerting/unified/components/receivers/form/fields/OptionField.tsx b/public/app/features/alerting/unified/components/receivers/form/fields/OptionField.tsx index 955d0bae209..d965fafb40d 100644 --- a/public/app/features/alerting/unified/components/receivers/form/fields/OptionField.tsx +++ b/public/app/features/alerting/unified/components/receivers/form/fields/OptionField.tsx @@ -1,5 +1,5 @@ import { css } from '@emotion/css'; -import { FC, useEffect } from 'react'; +import { FC } from 'react'; import { Controller, DeepMap, FieldError, useFormContext } from 'react-hook-form'; import { GrafanaTheme2 } from '@grafana/data'; @@ -116,7 +116,7 @@ const OptionInput: FC = ({ getOptionMeta, }) => { const styles = useStyles2(getStyles); - const { control, register, unregister, setValue } = useFormContext(); + const { control, register, setValue } = useFormContext(); const optionMeta = getOptionMeta?.(option); @@ -125,14 +125,6 @@ const OptionInput: FC = ({ const secureFieldKey = option.secure && option.secureFieldKey ? option.secureFieldKey : ''; const isEncryptedInput = secureFieldKey && secureFields?.[secureFieldKey]; - // workaround for https://github.com/react-hook-form/react-hook-form/issues/4993#issuecomment-829012506 - useEffect( - () => () => { - unregister(name, { keepValue: false }); - }, - [unregister, name] - ); - const useTemplates = option.placeholder.includes('{{ template'); function onSelectTemplate(template: string) { diff --git a/public/app/features/alerting/unified/components/rule-editor/alert-rule-form/simplifiedRouting/contactPoint/ContactPointSelector.tsx b/public/app/features/alerting/unified/components/rule-editor/alert-rule-form/simplifiedRouting/contactPoint/ContactPointSelector.tsx index c512615ce78..a71930c8b88 100644 --- a/public/app/features/alerting/unified/components/rule-editor/alert-rule-form/simplifiedRouting/contactPoint/ContactPointSelector.tsx +++ b/public/app/features/alerting/unified/components/rule-editor/alert-rule-form/simplifiedRouting/contactPoint/ContactPointSelector.tsx @@ -3,10 +3,12 @@ import { isEmpty } from 'lodash'; import { useEffect } from 'react'; import { Controller, useFormContext } from 'react-hook-form'; +import { base64UrlEncode } from '@grafana/alerting'; import { ContactPointSelector as GrafanaManagedContactPointSelector, alertingAPI } from '@grafana/alerting/unstable'; import { Trans, t } from '@grafana/i18n'; import { Field, FieldValidationMessage, Stack, TextLink } from '@grafana/ui'; import { RuleFormValues } from 'app/features/alerting/unified/types/rule-form'; +import { stringifyFieldSelector } from 'app/features/alerting/unified/utils/k8s/utils'; import { createRelativeUrl } from 'app/features/alerting/unified/utils/url'; export interface ContactPointSelectorProps { @@ -21,9 +23,13 @@ export function ContactPointSelector({ alertManager }: ContactPointSelectorProps // check if the contact point still exists, we'll use listReceiver to check if the contact point exists because getReceiver doesn't work with // contact point titles but with UUIDs (which is not what we store on the alert rule definition) - const { currentData, status } = alertingAPI.endpoints.listReceiver.useQuery({ - fieldSelector: `spec.title=${contactPointInForm}`, - }); + const encodedContactPoint = contactPointInForm ? base64UrlEncode(contactPointInForm) : ''; + const { currentData, status } = alertingAPI.endpoints.listReceiver.useQuery( + { + fieldSelector: stringifyFieldSelector([['metadata.name', encodedContactPoint]]), + }, + { skip: !contactPointInForm } + ); const contactPointNotFound = contactPointInForm && status === QueryStatus.fulfilled && isEmpty(currentData?.items); @@ -37,6 +43,7 @@ export function ContactPointSelector({ alertManager }: ContactPointSelectorProps return ( diff --git a/public/app/features/alerting/unified/components/rule-editor/notificaton-preview/ContactPointGroup.tsx b/public/app/features/alerting/unified/components/rule-editor/notificaton-preview/ContactPointGroup.tsx index ba69db35766..9d4510a2ac1 100644 --- a/public/app/features/alerting/unified/components/rule-editor/notificaton-preview/ContactPointGroup.tsx +++ b/public/app/features/alerting/unified/components/rule-editor/notificaton-preview/ContactPointGroup.tsx @@ -3,6 +3,7 @@ import { PropsWithChildren, ReactNode } from 'react'; import Skeleton from 'react-loading-skeleton'; import { useToggle } from 'react-use'; +import { base64UrlEncode } from '@grafana/alerting'; import { alertingAPI, getContactPointDescription } from '@grafana/alerting/unstable'; import { GrafanaTheme2 } from '@grafana/data'; import { Trans, t } from '@grafana/i18n'; @@ -23,8 +24,10 @@ interface ContactPointGroupProps extends PropsWithChildren { export function GrafanaContactPointGroup({ name, matchedInstancesCount, children }: ContactPointGroupProps) { // find receiver by name – since this is what we store in the alert rule definition + const encodedName = base64UrlEncode(name); + const { data, isLoading } = alertingAPI.endpoints.listReceiver.useQuery({ - fieldSelector: stringifyFieldSelector([['spec.title', name]]), + fieldSelector: stringifyFieldSelector([['metadata.name', encodedName]]), }); // grab the first result from the fieldSelector result diff --git a/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/QueryAndExpressionsStep.tsx b/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/QueryAndExpressionsStep.tsx index aa5e9afa9f1..844443a4dff 100644 --- a/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/QueryAndExpressionsStep.tsx +++ b/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/QueryAndExpressionsStep.tsx @@ -4,7 +4,7 @@ import { useCallback, useEffect, useMemo, useReducer, useState } from 'react'; import { Controller, useFormContext } from 'react-hook-form'; import { useEffectOnce } from 'react-use'; -import { GrafanaTheme2, getDefaultRelativeTimeRange } from '@grafana/data'; +import { GrafanaTheme2 } from '@grafana/data'; import { selectors } from '@grafana/e2e-selectors'; import { Trans, t } from '@grafana/i18n'; import { config, getDataSourceSrv } from '@grafana/runtime'; @@ -31,7 +31,6 @@ import { } from 'app/features/expressions/types'; import { AlertQuery } from 'app/types/unified-alerting-dto'; -import { useRulesSourcesWithRuler } from '../../../hooks/useRuleSourcesWithRuler'; import { areQueriesTransformableToSimpleCondition, isExpressionQueryInAlert, @@ -100,7 +99,7 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod control, } = useFormContext(); - const { queryPreviewData, runQueries, cancelQueries, isPreviewLoading, clearPreviewData } = useAlertQueryRunner(); + const { queryPreviewData, runQueries, cancelQueries, isPreviewLoading } = useAlertQueryRunner(); const isSwitchModeEnabled = config.featureToggles.alertingQueryAndExpressionsStepMode ?? false; const initialState = { @@ -161,8 +160,6 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod } }, [simplifiedQueryStep, expressionQueries, isGrafanaAlertingType, setSimpleCondition]); - const { rulesSourcesWithRuler, isLoading: rulerSourcesIsLoading } = useRulesSourcesWithRuler(); - const runQueriesPreview = useCallback( (condition?: string) => { if (isCloudAlertRuleType) { @@ -303,40 +300,6 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod [runQueriesPreview, setValue, updateExpressionAndDatasource] ); - // Using dataSourcesWithRuler[0] gives incorrect types - no undefined - // Using at(0) provides a safe type with undefined - const recordingRuleDefaultDatasource = rulesSourcesWithRuler.at(0); - - useEffect(() => { - clearPreviewData(); - if (type === RuleFormType.cloudRecording) { - const expr = getValues('expression'); - - if (!recordingRuleDefaultDatasource) { - return; - } - - const datasourceUid = - (editingExistingRule && getDataSourceSrv().getInstanceSettings(dataSourceName)?.uid) || - recordingRuleDefaultDatasource.uid; - - const defaultQuery = { - refId: 'A', - datasourceUid, - queryType: '', - relativeTimeRange: getDefaultRelativeTimeRange(), - expr, - instant: true, - model: { - refId: 'A', - hide: false, - expr, - }, - }; - dispatch(setRecordingRulesQueries({ recordingRuleQueries: [defaultQuery], expression: expr })); - } - }, [type, recordingRuleDefaultDatasource, editingExistingRule, getValues, dataSourceName, clearPreviewData]); - const onDuplicateQuery = useCallback((query: AlertQuery) => { dispatch(duplicateQuery(query)); }, []); @@ -475,10 +438,7 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod } : undefined; - const canSelectDataSourceManaged = - onlyOneDSInQueries(queries) && - Boolean(rulesSourcesWithRuler.length) && - queries.some((query) => rulesSourcesWithRuler.some((source) => source.uid === query.datasourceUid)); + const canSelectDataSourceManaged = onlyOneDSInQueries(queries); return ( <> @@ -508,7 +468,7 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod {/* This is the PromQL Editor for recording rules */} {isRecordingRuleType && dataSourceName && ( - + )} - {rulerSourcesIsLoading && ( - - Loading data sources... - - )} - {/* This is the PromQL Editor for Cloud rules */} - {!rulerSourcesIsLoading && isCloudAlertRuleType && dataSourceName && ( + {isCloudAlertRuleType && dataSourceName && ( - + { @@ -559,7 +513,6 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod @@ -568,7 +521,7 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod )} {/* This is the editor for Grafana managed rules and Grafana managed recording rules */} - {!rulerSourcesIsLoading && isGrafanaManagedRuleByType(type) && ( + {isGrafanaManagedRuleByType(type) && ( {/* Data Queries */} diff --git a/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/SmartAlertTypeDetector.tsx b/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/SmartAlertTypeDetector.tsx index 24519be98a5..4058fa97c10 100644 --- a/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/SmartAlertTypeDetector.tsx +++ b/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/SmartAlertTypeDetector.tsx @@ -1,33 +1,25 @@ import { useFormContext } from 'react-hook-form'; -import { DataSourceInstanceSettings } from '@grafana/data'; import { Trans, t } from '@grafana/i18n'; -import { DataSourceJsonData } from '@grafana/schema'; import { RadioButtonGroup, Stack, Text } from '@grafana/ui'; import { AlertQuery } from 'app/types/unified-alerting-dto'; import { RuleFormType, RuleFormValues } from '../../../types/rule-form'; import { NeedHelpInfo } from '../NeedHelpInfo'; -import { getCanSwitch } from './utils'; +import { useGetCanSwitch } from './utils'; interface SmartAlertTypeDetectorProps { editingExistingRule: boolean; - rulesSourcesWithRuler: Array>; queries: AlertQuery[]; onClickSwitch: () => void; } -export function SmartAlertTypeDetector({ - editingExistingRule, - rulesSourcesWithRuler, - queries, - onClickSwitch, -}: SmartAlertTypeDetectorProps) { +export function SmartAlertTypeDetector({ editingExistingRule, queries, onClickSwitch }: SmartAlertTypeDetectorProps) { const { getValues } = useFormContext(); const [ruleFormType] = getValues(['type']); - const canSwitch = getCanSwitch({ queries, ruleFormType, rulesSourcesWithRuler }); + const canSwitch = useGetCanSwitch({ queries, ruleFormType }); const options = [ { label: t('alerting.smart-alert-type-detector.grafana-managed', 'Grafana-managed'), value: RuleFormType.grafana }, diff --git a/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/utils.ts b/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/utils.ts index 138db1979bf..acfe52dfdb1 100644 --- a/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/utils.ts +++ b/public/app/features/alerting/unified/components/rule-editor/query-and-alert-condition/utils.ts @@ -1,10 +1,9 @@ -import { DataSourceInstanceSettings } from '@grafana/data'; -import { DataSourceJsonData } from '@grafana/schema/dist/esm/index'; import { contextSrv } from 'app/core/core'; import { ExpressionDatasourceUID } from 'app/features/expressions/types'; import { AccessControlAction } from 'app/types/accessControl'; import { AlertQuery } from 'app/types/unified-alerting-dto'; +import { useHasRulerV2 } from '../../../hooks/useHasRuler'; import { RuleFormType } from '../../../types/rule-form'; export const onlyOneDSInQueries = (queries: AlertQuery[]) => { @@ -27,12 +26,10 @@ function getAvailableRuleTypes() { return { enabledRuleTypes, defaultRuleType }; } -export const getCanSwitch = ({ +export const useGetCanSwitch = ({ queries, ruleFormType, - rulesSourcesWithRuler, }: { - rulesSourcesWithRuler: Array>; queries: AlertQuery[]; ruleFormType: RuleFormType | undefined; }) => { @@ -41,14 +38,12 @@ export const getCanSwitch = ({ // check if we have only one query in queries and if it's a cloud datasource const onlyOneDS = onlyOneDSInQueries(queries); - const dataSourceIdFromQueries = queries[0]?.datasourceUid ?? ''; const isRecordingRuleType = ruleFormType === RuleFormType.cloudRecording; + const dataSourceIdFromQueries = queries[0]?.datasourceUid ?? ''; + const { hasRuler } = useHasRulerV2(dataSourceIdFromQueries); //let's check if we switch to cloud type - const canSwitchToCloudRule = - !isRecordingRuleType && - onlyOneDS && - rulesSourcesWithRuler.some((dsJsonData) => dsJsonData.uid === dataSourceIdFromQueries); + const canSwitchToCloudRule = !isRecordingRuleType && onlyOneDS && hasRuler; const canSwitchToGrafanaRule = !isRecordingRuleType; // check for enabled types diff --git a/public/app/features/alerting/unified/components/rule-viewer/ContactPointLink.tsx b/public/app/features/alerting/unified/components/rule-viewer/ContactPointLink.tsx index b3c9196e664..741708860e0 100644 --- a/public/app/features/alerting/unified/components/rule-viewer/ContactPointLink.tsx +++ b/public/app/features/alerting/unified/components/rule-viewer/ContactPointLink.tsx @@ -1,9 +1,11 @@ import { ComponentProps } from 'react'; import Skeleton from 'react-loading-skeleton'; +import { base64UrlEncode } from '@grafana/alerting'; import { alertingAPI } from '@grafana/alerting/unstable'; import { TextLink } from '@grafana/ui'; +import { stringifyFieldSelector } from '../../utils/k8s/utils'; import { makeEditContactPointLink } from '../../utils/misc'; interface ContactPointLinkProps extends Omit, 'href' | 'children'> { @@ -11,9 +13,11 @@ interface ContactPointLinkProps extends Omit, 'h } export const ContactPointLink = ({ name, ...props }: ContactPointLinkProps) => { - // find receiver by name – since this is what we store in the alert rule definition + const encodedName = base64UrlEncode(name); + + // find receiver by name using metadata.name field selector const { currentData, isLoading, isSuccess } = alertingAPI.endpoints.listReceiver.useQuery({ - fieldSelector: `spec.title=${name}`, + fieldSelector: stringifyFieldSelector([['metadata.name', encodedName]]), }); // grab the first result from the fieldSelector result diff --git a/public/app/features/alerting/unified/components/rule-viewer/RuleViewer.tsx b/public/app/features/alerting/unified/components/rule-viewer/RuleViewer.tsx index 9d50aa13531..80352b8e802 100644 --- a/public/app/features/alerting/unified/components/rule-viewer/RuleViewer.tsx +++ b/public/app/features/alerting/unified/components/rule-viewer/RuleViewer.tsx @@ -342,7 +342,7 @@ const PrometheusConsistencyCheck = withErrorBoundary( ({ ruleIdentifier }: PrometheusConsistencyCheckProps) => { const [ref, { width }] = useMeasure(); - const { hasRuler } = useHasRulerV2(ruleIdentifierToRuleSourceIdentifier(ruleIdentifier)); + const { hasRuler } = useHasRulerV2(ruleIdentifierToRuleSourceIdentifier(ruleIdentifier).uid); const { result: ruleLocation } = useRuleLocation(ruleIdentifier); const { waitForGroupConsistency, groupConsistent } = useRuleGroupConsistencyCheck(); diff --git a/public/app/features/alerting/unified/hooks/useHasRuler.ts b/public/app/features/alerting/unified/hooks/useHasRuler.ts index 2720ad7121d..b1e98889222 100644 --- a/public/app/features/alerting/unified/hooks/useHasRuler.ts +++ b/public/app/features/alerting/unified/hooks/useHasRuler.ts @@ -1,4 +1,4 @@ -import { RulesSource, RulesSourceIdentifier } from 'app/types/unified-alerting'; +import { GrafanaRulesSourceSymbol, RulesSource } from 'app/types/unified-alerting'; import { featureDiscoveryApi } from '../api/featureDiscoveryApi'; import { getRulesSourceName } from '../utils/datasource'; @@ -16,8 +16,8 @@ export function useHasRuler(rulesSource: RulesSource) { return { hasRuler, rulerConfig: dsFeatures?.rulerConfig }; } -export function useHasRulerV2(rulesSource: RulesSourceIdentifier) { - const { currentData: dsFeatures } = useDiscoverDsFeaturesQuery({ uid: rulesSource.uid }); +export function useHasRulerV2(ruleUid: string | typeof GrafanaRulesSourceSymbol) { + const { currentData: dsFeatures } = useDiscoverDsFeaturesQuery({ uid: ruleUid }); const hasRuler = Boolean(dsFeatures?.rulerConfig); return { hasRuler, rulerConfig: dsFeatures?.rulerConfig }; diff --git a/public/app/features/alerting/unified/mocks/server/handlers/k8s/timeIntervals.k8s.ts b/public/app/features/alerting/unified/mocks/server/handlers/k8s/timeIntervals.k8s.ts index 126423077d7..84503c2ce13 100644 --- a/public/app/features/alerting/unified/mocks/server/handlers/k8s/timeIntervals.k8s.ts +++ b/public/app/features/alerting/unified/mocks/server/handlers/k8s/timeIntervals.k8s.ts @@ -1,9 +1,10 @@ import { HttpResponse, http } from 'msw'; +import { base64UrlEncode } from '@grafana/alerting'; import { filterBySelector } from 'app/features/alerting/unified/mocks/server/handlers/k8s/utils'; import { ALERTING_API_SERVER_BASE_URL, getK8sResponse } from 'app/features/alerting/unified/mocks/server/utils'; import { ComGithubGrafanaGrafanaPkgApisAlertingNotificationsV0Alpha1TimeInterval } from 'app/features/alerting/unified/openapi/timeIntervalsApi.gen'; -import { PROVENANCE_ANNOTATION, PROVENANCE_NONE } from 'app/features/alerting/unified/utils/k8s/constants'; +import { K8sAnnotations, PROVENANCE_NONE } from 'app/features/alerting/unified/utils/k8s/constants'; /** UID of a time interval that we expect to follow all happy paths within tests/mocks */ export const TIME_INTERVAL_UID_HAPPY_PATH = 'f4eae7a4895fa786'; @@ -20,9 +21,9 @@ const allTimeIntervals = getK8sResponse ); } - // Rudimentary filter support for `spec.name` + // Rudimentary filter support for `metadata.name` const url = new URL(request.url); const fieldSelector = url.searchParams.get('fieldSelector'); - if (fieldSelector && fieldSelector.includes('spec.name')) { + if (fieldSelector && fieldSelector.includes('metadata.name')) { const filteredItems = filterBySelector(allTimeIntervals.items, fieldSelector); return HttpResponse.json({ items: filteredItems }); diff --git a/public/app/features/alerting/unified/mocks/server/utils.ts b/public/app/features/alerting/unified/mocks/server/utils.ts index 5f01e038225..0ff0618a5f2 100644 --- a/public/app/features/alerting/unified/mocks/server/utils.ts +++ b/public/app/features/alerting/unified/mocks/server/utils.ts @@ -1,5 +1,6 @@ import { DefaultBodyType, HttpResponse, HttpResponseResolver, PathParams } from 'msw'; +import { base64UrlEncode } from '@grafana/alerting'; import { PromRuleGroupDTO, PromRulesResponse } from 'app/types/unified-alerting-dto'; /** Helper method to help generate a kubernetes-style response with a list of items */ @@ -20,7 +21,7 @@ export function paginatedHandlerFor( ): HttpResponseResolver { const orderedGroupsWithCursor = groups.map((group) => ({ ...group, - id: Buffer.from(`${group.file}-${group.name}`).toString('base64url'), + id: base64UrlEncode(`${group.file}-${group.name}`), })); return ({ request }) => { diff --git a/public/app/features/alerting/unified/utils/k8s/utils.ts b/public/app/features/alerting/unified/utils/k8s/utils.ts index e75818cde66..8aecb9dd46b 100644 --- a/public/app/features/alerting/unified/utils/k8s/utils.ts +++ b/public/app/features/alerting/unified/utils/k8s/utils.ts @@ -52,5 +52,7 @@ export const encodeFieldSelector = (value: string): string => { type FieldSelector = [string, string] | [string, string, '=' | '!=']; export const stringifyFieldSelector = (fieldSelectors: FieldSelector[]): string => { - return fieldSelectors.map(([key, value, operator = '=']) => `${key}${operator}${value}`).join(','); + return fieldSelectors + .map(([key, value, operator = '=']) => `${key}${operator}${encodeFieldSelector(value)}`) + .join(','); }; diff --git a/public/app/features/auth-config/constants.ts b/public/app/features/auth-config/constants.ts index 954f35e7be5..3a98f08ba18 100644 --- a/public/app/features/auth-config/constants.ts +++ b/public/app/features/auth-config/constants.ts @@ -9,7 +9,7 @@ export const UIMap: Record = { google: ['google', 'Google'], generic_oauth: ['lock', 'Generic OAuth'], grafana_com: ['grafana', 'Grafana.com'], - azuread: ['microsoft', 'Azure AD'], + azuread: ['microsoft', 'Entra ID'], okta: ['okta', 'Okta'], scim: ['scim', 'SCIM'], }; diff --git a/public/app/features/auth-config/fields.tsx b/public/app/features/auth-config/fields.tsx index 029e67b581a..96bb2e3ccea 100644 --- a/public/app/features/auth-config/fields.tsx +++ b/public/app/features/auth-config/fields.tsx @@ -906,7 +906,7 @@ export function fieldMap(provider: string): Record { label: t('auth-config.fields.domain-hint-label', 'Domain hint'), description: t( 'auth-config.fields.domain-hint-description', - 'Parameter to indicate the realm of the user in the Azure AD/Entra ID tenant and streamline the login process.' + 'Parameter to indicate the realm of the user in the Entra ID tenant and streamline the login process.' ), type: 'text', validation: { diff --git a/public/app/features/browse-dashboards/BrowseDashboardsPage.tsx b/public/app/features/browse-dashboards/BrowseDashboardsPage.tsx index c658fbaa6cd..bc9572efdbd 100644 --- a/public/app/features/browse-dashboards/BrowseDashboardsPage.tsx +++ b/public/app/features/browse-dashboards/BrowseDashboardsPage.tsx @@ -206,7 +206,13 @@ const BrowseDashboardsPage = memo(({ queryParams }: { queryParams: Record ) : ( - + ) } diff --git a/public/app/features/browse-dashboards/components/BrowseView.tsx b/public/app/features/browse-dashboards/components/BrowseView.tsx index 1c98b2d440a..19b6ee383fc 100644 --- a/public/app/features/browse-dashboards/components/BrowseView.tsx +++ b/public/app/features/browse-dashboards/components/BrowseView.tsx @@ -33,9 +33,10 @@ interface BrowseViewProps { width: number; folderUID: string | undefined; permissions: BrowseDashboardsPermissions; + isReadOnlyRepo?: boolean; } -export function BrowseView({ folderUID, width, height, permissions }: BrowseViewProps) { +export function BrowseView({ folderUID, width, height, permissions, isReadOnlyRepo }: BrowseViewProps) { const status = useBrowseLoadingStatus(folderUID); const dispatch = useDispatch(); const flatTree = useFlatTreeState(folderUID); @@ -163,6 +164,7 @@ export function BrowseView({ folderUID, width, height, permissions }: BrowseView href={folderUID ? `dashboard/new?folderUid=${folderUID}` : 'dashboard/new'} icon="plus" size="lg" + disabled={isReadOnlyRepo} > Create dashboard @@ -173,7 +175,7 @@ export function BrowseView({ folderUID, width, height, permissions }: BrowseView : t('browse-dashboards.empty-state.title', "You haven't created any dashboards yet") } > - {folderUID && ( + {folderUID && !isReadOnlyRepo && ( Add/move dashboards to your folder at{' '} diff --git a/public/app/features/browse-dashboards/components/FolderActionsButton.tsx b/public/app/features/browse-dashboards/components/FolderActionsButton.tsx index dd88bf2679d..ec093e3aa69 100644 --- a/public/app/features/browse-dashboards/components/FolderActionsButton.tsx +++ b/public/app/features/browse-dashboards/components/FolderActionsButton.tsx @@ -7,7 +7,9 @@ import { Button, Drawer, Dropdown, Icon, Menu, MenuItem } from '@grafana/ui'; import { Permissions } from 'app/core/components/AccessControl'; import { appEvents } from 'app/core/core'; import { RepoType } from 'app/features/provisioning/Wizard/types'; +import { BulkMoveProvisionedResource } from 'app/features/provisioning/components/BulkActions/BulkMoveProvisionedResource'; import { DeleteProvisionedFolderForm } from 'app/features/provisioning/components/Folders/DeleteProvisionedFolderForm'; +import { useIsProvisionedInstance } from 'app/features/provisioning/hooks/useIsProvisionedInstance'; import { getReadOnlyTooltipText } from 'app/features/provisioning/utils/repository'; import { ShowModalReactEvent } from 'app/types/events'; import { FolderDTO } from 'app/types/folders'; @@ -29,14 +31,18 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) const [isOpen, setIsOpen] = useState(false); const [showPermissionsDrawer, setShowPermissionsDrawer] = useState(false); const [showDeleteProvisionedFolderDrawer, setShowDeleteProvisionedFolderDrawer] = useState(false); + const [showMoveProvisionedFolderDrawer, setShowMoveProvisionedFolderDrawer] = useState(false); const [moveFolder] = useMoveFolderMutationFacade(); + const isProvisionedInstance = useIsProvisionedInstance(); const deleteFolder = useDeleteFolderMutationFacade(); const { canEditFolders, canDeleteFolders, canViewPermissions, canSetPermissions } = getFolderPermissions(folder); const isProvisionedFolder = folder.managedBy === ManagerKind.Repo; + // When its single provisioned folder, cannot move the root repository folder + const isProvisionedRootFolder = isProvisionedFolder && !isProvisionedInstance && folder.parentUid === undefined; // Can only move folders when the folder is not provisioned - const canMoveFolder = canEditFolders && !isProvisionedFolder; + const canMoveFolder = canEditFolders && !isProvisionedRootFolder; const onMove = async (destinationUID: string) => { await moveFolder({ folderUID: folder.uid, destinationUID: destinationUID }); @@ -115,6 +121,10 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) setShowDeleteProvisionedFolderDrawer(true); }; + const handleShowMoveProvisionedFolderDrawer = () => { + setShowMoveProvisionedFolderDrawer(true); + }; + const managePermissionsLabel = t('browse-dashboards.folder-actions-button.manage-permissions', 'Manage permissions'); const moveLabel = t('browse-dashboards.folder-actions-button.move', 'Move'); const deleteLabel = t('browse-dashboards.folder-actions-button.delete', 'Delete'); @@ -122,7 +132,12 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props) const menu = ( {canViewPermissions && setShowPermissionsDrawer(true)} label={managePermissionsLabel} />} - {canMoveFolder && !isReadOnlyRepo && } + {canMoveFolder && !isReadOnlyRepo && ( + + )} {canDeleteFolders && !isReadOnlyRepo && ( )} + {showMoveProvisionedFolderDrawer && ( + setShowMoveProvisionedFolderDrawer(false)} + > + setShowMoveProvisionedFolderDrawer(false)} + /> + + )} ); } diff --git a/public/app/features/commandPalette/actions/recentScopesActions.ts b/public/app/features/commandPalette/actions/recentScopesActions.ts index 5afbf3fd115..99687d837d3 100644 --- a/public/app/features/commandPalette/actions/recentScopesActions.ts +++ b/public/app/features/commandPalette/actions/recentScopesActions.ts @@ -16,12 +16,20 @@ export function getRecentScopesActions(): CommandPaletteAction[] { const recentScopes = scopesSelectorService.getRecentScopes(); return recentScopes.map((recentScope) => { + const names = recentScope.map((scope) => scope.spec.title).join(', '); + const keywords = recentScope + .map((scope) => `${scope.spec.title} ${scope.metadata.name}`) + .concat(names) + .join(' '); return { - id: recentScope.map((scope) => scope.spec.title).join(', '), - name: recentScope.map((scope) => scope.spec.title).join(', '), - section: t('command-palette.section.recent-scopes', 'Recent scopes'), - // Only show the parent of the first scope for now + id: names, + name: names, + section: { + name: t('command-palette.section.recent-scopes', 'Recent scopes'), + priority: RECENT_SCOPES_PRIORITY, + }, subtitle: recentScope[0]?.parentNode?.spec.title, + keywords: keywords, priority: RECENT_SCOPES_PRIORITY, perform: () => { scopesSelectorService.changeScopes(recentScope.map((scope) => scope.metadata.name)); diff --git a/public/app/features/commandPalette/actions/scopeActions.test.tsx b/public/app/features/commandPalette/actions/scopeActions.test.tsx index 093ac40b1c7..0d828d66a81 100644 --- a/public/app/features/commandPalette/actions/scopeActions.test.tsx +++ b/public/app/features/commandPalette/actions/scopeActions.test.tsx @@ -230,6 +230,7 @@ describe('useRegisterScopesActions', () => { // The main difference here is that we map it to a parent if we are in the "scopes" section of the cmdK. // In the previous test the scope actions were mapped to global level to show correctly. parent: 'scopes', + subtitle: 'some parent', }, ]; diff --git a/public/app/features/commandPalette/actions/scopeActions.tsx b/public/app/features/commandPalette/actions/scopeActions.tsx index 724a3d49d46..783ec7b1b35 100644 --- a/public/app/features/commandPalette/actions/scopeActions.tsx +++ b/public/app/features/commandPalette/actions/scopeActions.tsx @@ -139,7 +139,7 @@ function useScopesRow(onApply: () => void) { * @param parentId */ function useGlobalScopesSearch(searchQuery: string, parentId?: string | null) { - const { selectScope, searchAllNodes } = useScopeServicesState(); + const { selectScope, searchAllNodes, getScopeNodes } = useScopeServicesState(); const [actions, setActions] = useState(undefined); const searchQueryRef = useRef(); @@ -151,19 +151,42 @@ function useGlobalScopesSearch(searchQuery: string, parentId?: string | null) { if (searchQueryRef.current === searchQuery) { // Only show leaf nodes because otherwise there are issues with navigating to a category without knowing // where in the tree it is. - const leafNodes = nodes.filter((node) => node.spec.nodeType === 'leaf'); - const actions = [getScopesParentAction()]; - for (const node of leafNodes) { - actions.push(mapScopeNodeToAction(node, selectScope, parentId || undefined)); + + const parentNodesMap = new Map(); + + if (config.featureToggles.useMultipleScopeNodesEndpoint) { + // Make sure we only request unqiue parent node names + const uniqueParentNodeNames = [ + ...new Set(nodes.map((node) => node.spec.parentName).filter((name) => name !== undefined)), + ]; + getScopeNodes(uniqueParentNodeNames).then((parentNodes) => { + for (const parentNode of parentNodes) { + parentNodesMap.set(parentNode.metadata.name, parentNode.spec.title); + } + + const leafNodes = nodes.filter((node) => node.spec.nodeType === 'leaf'); + const actions = [getScopesParentAction()]; + for (const node of leafNodes) { + const parentName = parentNodesMap.get(node.spec.parentName); + actions.push(mapScopeNodeToAction(node, selectScope, parentId || undefined, parentName || undefined)); + } + setActions(actions); + }); + } else { + const leafNodes = nodes.filter((node) => node.spec.nodeType === 'leaf'); + const actions = [getScopesParentAction()]; + for (const node of leafNodes) { + actions.push(mapScopeNodeToAction(node, selectScope, parentId || undefined)); + } + setActions(actions); } - setActions(actions); } }); } else { searchQueryRef.current = undefined; setActions(undefined); } - }, [searchAllNodes, searchQuery, parentId, selectScope]); + }, [searchAllNodes, searchQuery, parentId, selectScope, getScopeNodes]); return actions; } diff --git a/public/app/features/commandPalette/actions/scopesUtils.test.ts b/public/app/features/commandPalette/actions/scopesUtils.test.ts index 0fe22c0f0b3..411497240fc 100644 --- a/public/app/features/commandPalette/actions/scopesUtils.test.ts +++ b/public/app/features/commandPalette/actions/scopesUtils.test.ts @@ -30,6 +30,7 @@ describe('mapScopeNodeToAction', () => { priority: SCOPES_PRIORITY, parent: 'parent1', perform: expect.any(Function), + subtitle: 'Parent Scope', }); }); @@ -43,6 +44,7 @@ describe('mapScopeNodeToAction', () => { keywords: 'Scope 1 scope1', priority: SCOPES_PRIORITY, parent: 'parent1', + subtitle: 'Parent Scope', }); // Non-leaf nodes don't have a perform function diff --git a/public/app/features/commandPalette/actions/scopesUtils.ts b/public/app/features/commandPalette/actions/scopesUtils.ts index 65c27a0c0c1..2848569247a 100644 --- a/public/app/features/commandPalette/actions/scopesUtils.ts +++ b/public/app/features/commandPalette/actions/scopesUtils.ts @@ -18,6 +18,7 @@ export function useScopeServicesState() { selectScope: () => {}, resetSelection: () => {}, searchAllNodes: () => Promise.resolve([]), + getScopeNodes: (_: string[]) => Promise.resolve([]), apply: () => {}, deselectScope: () => {}, nodes: {}, @@ -31,7 +32,7 @@ export function useScopeServicesState() { }, }; } - const { updateNode, filterNode, selectScope, resetSelection, searchAllNodes, deselectScope, apply } = + const { updateNode, filterNode, selectScope, resetSelection, searchAllNodes, deselectScope, apply, getScopeNodes } = services.scopesSelectorService; const selectorServiceState: ScopesSelectorServiceState | undefined = useObservable( services.scopesSelectorService.stateObservable ?? new Observable(), @@ -39,6 +40,7 @@ export function useScopeServicesState() { ); return { + getScopeNodes, filterNode, updateNode, selectScope, @@ -90,7 +92,12 @@ export function mapScopesNodesTreeToActions( if (child.spec.nodeType === 'leaf' && scopeIsSelected) { continue; } - let action = mapScopeNodeToAction(child, selectScope, parentId); + let action = mapScopeNodeToAction( + child, + selectScope, + parentId, + child.spec.parentName ? nodes[child.spec.parentName]?.spec.title : undefined + ); actions.push(action); traverse(childTreeNode, action.id); } @@ -110,13 +117,16 @@ export function mapScopesNodesTreeToActions( export function mapScopeNodeToAction( scopeNode: ScopeNode, selectScope: (id: string) => void, - parentId?: string + parentId?: string, + parentName?: string ): CommandPaletteAction { let action: CommandPaletteAction; + const subtitle = parentName || scopeNode.spec.parentName || undefined; if (parentId) { action = { id: `${parentId}/${scopeNode.metadata.name}`, name: scopeNode.spec.title, + subtitle: subtitle, keywords: `${scopeNode.spec.title} ${scopeNode.metadata.name}`, priority: SCOPES_PRIORITY, parent: parentId, @@ -135,7 +145,7 @@ export function mapScopeNodeToAction( keywords: `${scopeNode.spec.title} ${scopeNode.metadata.name}`, priority: SCOPES_PRIORITY, section: t('command-palette.action.scopes', 'Scopes'), - subtitle: scopeNode.spec.parentName, + subtitle: subtitle, perform: () => { selectScope(scopeNode.metadata.name); }, diff --git a/public/app/features/commandPalette/useMatches.ts b/public/app/features/commandPalette/useMatches.ts index 42a5716bd8d..3da87c25af4 100644 --- a/public/app/features/commandPalette/useMatches.ts +++ b/public/app/features/commandPalette/useMatches.ts @@ -190,10 +190,18 @@ function useInternalMatches(filtered: ActionImpl[], search: string): Match[] { const matchingIndices = fuzzySearch(haystack, throttledSearch); // Convert indices back to Match objects with proper scoring - const results: Match[] = matchingIndices.map((index, order) => ({ - action: throttledFiltered[index], - score: matchingIndices.length - order, // Higher score for better ranked matches - })); + const results: Match[] = matchingIndices.map((index, order) => { + const name = throttledFiltered[index].name; + const fullNameMatch = name.toLowerCase() === throttledSearch.toLowerCase(); + let score = matchingIndices.length - order; // Higher score for better ranked matches + if (fullNameMatch) { + score += 100; // Bumping for exact matches + } + return { + action: throttledFiltered[index], + score, + }; + }); return results; }, [throttledFiltered, throttledSearch]); diff --git a/public/app/features/commandPalette/values.ts b/public/app/features/commandPalette/values.ts index 2fc599254b0..281bbe38348 100644 --- a/public/app/features/commandPalette/values.ts +++ b/public/app/features/commandPalette/values.ts @@ -1,5 +1,6 @@ +// Bumped to way higher value, to give it more weight when searching +export const RECENT_SCOPES_PRIORITY = 50; export const SCOPES_PRIORITY = 8; -export const RECENT_SCOPES_PRIORITY = 7; export const RECENT_DASHBOARDS_PRIORITY = 6; export const ACTIONS_PRIORITY = 5; export const DEFAULT_PRIORITY = 4; diff --git a/public/app/features/connections/Connections.tsx b/public/app/features/connections/Connections.tsx index 6d40a294c55..4d40c5684b7 100644 --- a/public/app/features/connections/Connections.tsx +++ b/public/app/features/connections/Connections.tsx @@ -2,14 +2,19 @@ import { Navigate, Routes, Route, useLocation } from 'react-router-dom-v5-compat import { StoreState, useSelector } from 'app/types/store'; +import { isOpenSourceBuildOrUnlicenced } from '../admin/EnterpriseAuthFeaturesCard'; + import { ROUTES } from './constants'; import { AddNewConnectionPage } from './pages/AddNewConnectionPage'; +import { CacheFeatureHighlightPage } from './pages/CacheFeatureHighlightPage'; import ConnectionsHomePage from './pages/ConnectionsHomePage'; import { DataSourceDashboardsPage } from './pages/DataSourceDashboardsPage'; import { DataSourceDetailsPage } from './pages/DataSourceDetailsPage'; import { DataSourcesListPage } from './pages/DataSourcesListPage'; import { EditDataSourcePage } from './pages/EditDataSourcePage'; +import { InsightsFeatureHighlightPage } from './pages/InsightsFeatureHighlightPage'; import { NewDataSourcePage } from './pages/NewDataSourcePage'; +import { PermissionsFeatureHighlightPage } from './pages/PermissionsFeatureHighlightPage'; function RedirectToAddNewConnection() { const { search } = useLocation(); @@ -27,6 +32,7 @@ function RedirectToAddNewConnection() { export default function Connections() { const navIndex = useSelector((state: StoreState) => state.navIndex); const isAddNewConnectionPageOverridden = Boolean(navIndex['standalone-plugin-page-/connections/add-new-connection']); + const shouldEnableFeatureHighlights = isOpenSourceBuildOrUnlicenced(); return ( @@ -41,6 +47,27 @@ export default function Connections() { element={} /> } /> + + {shouldEnableFeatureHighlights && ( + <> + } + /> + } + /> + } + /> + + )} + (); + useInitDataSourceSettings(uid); + + const { navId, pageNav, dataSourceHeader } = useDataSourceTabNav(pageName); + const styles = useStyles2(getStyles); + + const info = useDataSourceInfo({ + dataSourcePluginName: pageNav.dataSourcePluginName, + alertingSupported: dataSourceHeader.alertingSupported, + }); + + return ( + } + info={info} + actions={} + > + +
+
+
+ +
+

{title}

+
{header}
+
+ {items.map((item) => ( +
+ + {item} +
+ ))} +
+
+ + Create a Grafana Cloud Free account to start using data source permissions. This feature is also + available with a Grafana Enterprise license. + +
+ + + Learn about Enterprise + +
+
+ + + Create account + +

+ + After creating an account, you can easily{' '} + + migrate this instance to Grafana Cloud + {' '} + with our Migration Assistant. + +

+
+
+ {`${pageName} +
+
+
+
+ ); +} + +const getStyles = (theme: GrafanaTheme2) => ({ + container: css({ + display: 'flex', + gap: theme.spacing(4), + alignItems: 'flex-start', + [theme.breakpoints.down('lg')]: { + flexDirection: 'column', + }, + }), + content: css({ + flex: '0 0 40%', + }), + imageContainer: css({ + flex: '0 0 60%', + display: 'flex', + [theme.breakpoints.down('lg')]: { + flex: '1 1 auto', + }, + padding: `${theme.spacing(5)} 10% 0 ${theme.spacing(5)}`, + }), + image: css({ + width: '100%', + borderRadius: theme.shape.radius.default, + boxShadow: theme.shadows.z3, + }), + buttonIcon: css({ + marginRight: theme.spacing(1), + }), + badge: css({ + marginBottom: theme.spacing(1), + }), + title: css({ + marginBottom: theme.spacing(2), + marginTop: theme.spacing(2), + }), + header: css({ + color: theme.colors.text.primary, + }), + + itemsList: css({ + marginBottom: theme.spacing(3), + marginTop: theme.spacing(3), + }), + + listItem: css({ + display: 'flex', + alignItems: 'flex-start', + color: theme.colors.text.primary, + lineHeight: theme.typography.bodySmall.lineHeight, + marginBottom: theme.spacing(2), + }), + + linkButton: css({ + marginBottom: theme.spacing(2), + }), + + footer: css({ + marginBottom: theme.spacing(3), + marginTop: theme.spacing(3), + }), + + icon: css({ + marginRight: theme.spacing(1), + color: theme.colors.success.main, + }), + footNote: css({ + color: theme.colors.text.secondary, + fontSize: theme.typography.bodySmall.fontSize, + }), +}); diff --git a/public/app/features/connections/hooks/useDataSourceTabNav.ts b/public/app/features/connections/hooks/useDataSourceTabNav.ts new file mode 100644 index 00000000000..2c7231330b5 --- /dev/null +++ b/public/app/features/connections/hooks/useDataSourceTabNav.ts @@ -0,0 +1,95 @@ +import { useLocation, useParams } from 'react-router-dom-v5-compat'; + +import { NavModel, NavModelItem } from '@grafana/data'; +import { t } from '@grafana/i18n'; +import { getDataSourceSrv } from '@grafana/runtime'; +import { getNavModel } from 'app/core/selectors/navModel'; +import { useDataSource, useDataSourceMeta, useDataSourceSettings } from 'app/features/datasources/state/hooks'; +import { getDataSourceLoadingNav, buildNavModel, getDataSourceNav } from 'app/features/datasources/state/navModel'; +import { useGetSingle } from 'app/features/plugins/admin/state/hooks'; +import { useSelector } from 'app/types/store'; + +export function useDataSourceTabNav(pageName: string, pageIdParam?: string) { + const { uid = '' } = useParams<{ uid: string }>(); + const location = useLocation(); + const datasource = useDataSource(uid); + const dataSourceMeta = useDataSourceMeta(datasource.type); + const datasourcePlugin = useGetSingle(datasource.type); + const params = new URLSearchParams(location.search); + const pageId = pageIdParam || params.get('page'); + + const { plugin, loadError, loading } = useDataSourceSettings(); + const dsi = getDataSourceSrv()?.getInstanceSettings(uid); + const hasAlertingEnabled = Boolean(dsi?.meta?.alerting ?? false); + const isAlertManagerDatasource = dsi?.type === 'alertmanager'; + const alertingSupported = hasAlertingEnabled || isAlertManagerDatasource; + + const navIndex = useSelector((state) => state.navIndex); + const navIndexId = pageId ? `datasource-${pageId}-${uid}` : `datasource-${pageName}-${uid}`; + + let pageNav: NavModel = { + node: { + text: t('connections.use-data-source-settings-nav.page-nav.text.data-source-nav-node', 'Data Source Nav Node'), + }, + main: { + text: t('connections.use-data-source-settings-nav.page-nav.text.data-source-nav-node', 'Data Source Nav Node'), + }, + }; + + if (loadError) { + const node: NavModelItem = { + text: loadError, + subTitle: t('connections.use-data-source-settings-nav.node.subTitle.data-source-error', 'Data Source Error'), + icon: 'exclamation-triangle', + }; + + pageNav = { + node: node, + main: node, + }; + } + + if (loading || !plugin) { + pageNav = getNavModel(navIndex, navIndexId, getDataSourceLoadingNav(pageName)); + } + + if (!datasource.uid) { + const node: NavModelItem = { + text: t('connections.use-data-source-settings-nav.node.subTitle.data-source-error', 'Data Source Error'), + icon: 'exclamation-triangle', + }; + + pageNav = { + node: node, + main: node, + }; + } + + if (plugin) { + pageNav = getNavModel( + navIndex, + navIndexId, + getDataSourceNav(buildNavModel(datasource, plugin), pageId || pageName) + ); + } + + const connectionsPageNav = { + ...pageNav.main, + dataSourcePluginName: datasourcePlugin?.name || plugin?.meta.name || '', + active: true, + text: datasource.name || '', + subTitle: dataSourceMeta.name ? `Type: ${dataSourceMeta.name}` : '', + children: (pageNav.main.children || []).map((navModelItem) => ({ + ...navModelItem, + url: navModelItem.url?.replace('datasources/edit/', '/connections/datasources/edit/'), + })), + }; + + return { + navId: 'connections-datasources', + pageNav: connectionsPageNav, + dataSourceHeader: { + alertingSupported, + }, + }; +} diff --git a/public/app/features/connections/pages/CacheFeatureHighlightPage.tsx b/public/app/features/connections/pages/CacheFeatureHighlightPage.tsx new file mode 100644 index 00000000000..bb63af4e189 --- /dev/null +++ b/public/app/features/connections/pages/CacheFeatureHighlightPage.tsx @@ -0,0 +1,33 @@ +import { t } from '@grafana/i18n'; +import cacheScreenshot from 'img/cache-screenshot.png'; + +import { FeatureHighlightsTabPage } from '../components/FeatureHighlightsTabPage'; + +export function CacheFeatureHighlightPage() { + return ( + + ); +} diff --git a/public/app/features/connections/pages/InsightsFeatureHighlightPage.tsx b/public/app/features/connections/pages/InsightsFeatureHighlightPage.tsx new file mode 100644 index 00000000000..78f24a5026c --- /dev/null +++ b/public/app/features/connections/pages/InsightsFeatureHighlightPage.tsx @@ -0,0 +1,40 @@ +import { t } from '@grafana/i18n'; +import insightsScreenshot from 'img/insights-screenshot.png'; + +import { FeatureHighlightsTabPage } from '../components/FeatureHighlightsTabPage'; + +export function InsightsFeatureHighlightPage() { + return ( + + ); +} diff --git a/public/app/features/connections/pages/PermissionsFeatureHighlightPage.tsx b/public/app/features/connections/pages/PermissionsFeatureHighlightPage.tsx new file mode 100644 index 00000000000..4d5b8884b34 --- /dev/null +++ b/public/app/features/connections/pages/PermissionsFeatureHighlightPage.tsx @@ -0,0 +1,36 @@ +import { t } from '@grafana/i18n'; +import permissionsScreenshot from 'img/permissions-screenshot.png'; + +import { FeatureHighlightsTabPage } from '../components/FeatureHighlightsTabPage'; + +export function PermissionsFeatureHighlightPage() { + return ( + + ); +} diff --git a/public/app/features/connections/tabs/ConnectData/DataSourceTabs.test.tsx b/public/app/features/connections/tabs/ConnectData/DataSourceTabs.test.tsx new file mode 100644 index 00000000000..6a772be530b --- /dev/null +++ b/public/app/features/connections/tabs/ConnectData/DataSourceTabs.test.tsx @@ -0,0 +1,150 @@ +import { RenderResult, screen } from '@testing-library/react'; +import { Route, Routes } from 'react-router-dom-v5-compat'; +import { render } from 'test/test-utils'; + +import { LayoutModes, PluginType } from '@grafana/data'; +import { setPluginLinksHook, setPluginComponentsHook } from '@grafana/runtime'; +import { contextSrv } from 'app/core/services/context_srv'; +import * as api from 'app/features/datasources/api'; +import { getMockDataSources } from 'app/features/datasources/mocks/dataSourcesMocks'; +import { configureStore } from 'app/store/configureStore'; + +import { getPluginsStateMock } from '../../../plugins/admin/mocks/mockHelpers'; +import Connections from '../../Connections'; +import { ROUTES } from '../../constants'; +import { navIndex } from '../../mocks/store.navIndex.mock'; + +setPluginLinksHook(() => ({ links: [], isLoading: false })); +setPluginComponentsHook(() => ({ components: [], isLoading: false })); + +const mockDatasources = getMockDataSources(3); + +const renderPage = ( + path: string = ROUTES.Base, + store = configureStore({ + navIndex, + plugins: getPluginsStateMock([]), + dataSources: { + dataSources: mockDatasources, + dataSourcesCount: mockDatasources.length, + isLoadingDataSources: false, + searchQuery: '', + dataSourceTypeSearchQuery: '', + layoutMode: LayoutModes.List, + dataSource: mockDatasources[0], + dataSourceMeta: { + id: '', + name: '', + type: PluginType.panel, + info: { + author: { + name: '', + url: undefined, + }, + description: '', + links: [], + logos: { + large: '', + small: '', + }, + screenshots: [], + updated: '', + version: '', + }, + module: '', + baseUrl: '', + backend: true, + isBackend: true, + }, + isLoadingDataSourcePlugins: false, + plugins: [], + categories: [], + isSortAscending: true, + }, + }) +): RenderResult => { + return render( + + } /> + , + { + store, + historyOptions: { initialEntries: [path] }, + } + ); +}; + +jest.mock('@grafana/runtime', () => { + const original = jest.requireActual('@grafana/runtime'); + return { + ...original, + config: { + ...original.config, + bootData: { + user: { + orgId: 1, + timezone: 'UTC', + }, + navTree: [], + }, + featureToggles: { + ...original.config.featureToggles, + }, + datasources: {}, + defaultDatasource: '', + buildInfo: { + ...original.config.buildInfo, + edition: 'Open Source', + }, + caching: { + ...original.config.caching, + enabled: true, + }, + }, + getTemplateSrv: () => ({ + replace: (str: string) => str, + }), + getDataSourceSrv: () => { + return { + getInstanceSettings: (uid: string) => { + return { + id: uid, + uid: uid, + type: PluginType.datasource, + name: uid, + meta: { + id: uid, + name: uid, + type: PluginType.datasource, + backend: true, + isBackend: true, + }, + }; + }, + }; + }, + }; +}); + +describe('DataSourceEditTabs', () => { + beforeEach(() => { + process.env.NODE_ENV = 'test'; + (api.getDataSources as jest.Mock) = jest.fn().mockResolvedValue(mockDatasources); + (contextSrv.hasPermission as jest.Mock) = jest.fn().mockReturnValue(true); + }); + + it('should render Permissions and Insights tabs', () => { + const path = ROUTES.DataSourcesEdit.replace(':uid', mockDatasources[0].uid); + renderPage(path); + + const permissionsTab = screen.getByTestId('data-testid Tab Permissions'); + expect(permissionsTab).toBeInTheDocument(); + expect(permissionsTab).toHaveTextContent('Permissions'); + expect(permissionsTab).toHaveAttribute('href', '/connections/datasources/edit/x/permissions'); + + const insightsTab = screen.getByTestId('data-testid Tab Insights'); + expect(insightsTab).toBeInTheDocument(); + expect(insightsTab).toHaveTextContent('Insights'); + expect(insightsTab).toHaveAttribute('href', '/connections/datasources/edit/x/insights'); + }); +}); diff --git a/public/app/features/dashboard-scene/edit-pane/EditPaneHeader.tsx b/public/app/features/dashboard-scene/edit-pane/EditPaneHeader.tsx index e0e5a74a10a..b2a7a149715 100644 --- a/public/app/features/dashboard-scene/edit-pane/EditPaneHeader.tsx +++ b/public/app/features/dashboard-scene/edit-pane/EditPaneHeader.tsx @@ -4,7 +4,7 @@ import { GrafanaTheme2 } from '@grafana/data'; import { selectors } from '@grafana/e2e-selectors'; import { t } from '@grafana/i18n'; import { Button, Menu, Stack, Text, useStyles2, Dropdown, Icon, IconButton } from '@grafana/ui'; -import { trackDeleteDashboardElement } from 'app/features/dashboard/utils/tracking'; +import { trackDeleteDashboardElement } from 'app/features/dashboard-scene/utils/tracking'; import { EditableDashboardElement } from '../scene/types/EditableDashboardElement'; diff --git a/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.test.ts b/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.test.ts index 89e83aae420..00de1c7ea13 100644 --- a/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.test.ts +++ b/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.test.ts @@ -1,10 +1,15 @@ +import { configureStore } from '@reduxjs/toolkit'; import { advanceBy } from 'jest-date-mock'; +import { UnknownAction } from 'redux'; +import { of } from 'rxjs'; +import { createFetchResponse } from 'test/helpers/createFetchResponse'; import { BackendSrv, config, locationService, setBackendSrv } from '@grafana/runtime'; import { Spec as DashboardV2Spec, defaultSpec as defaultDashboardV2Spec, } from '@grafana/schema/dist/esm/schema/dashboard/v2'; +import { provisioningAPIv0alpha1 } from 'app/api/clients/provisioning/v0alpha1'; import store from 'app/core/store'; import { getDashboardAPI } from 'app/features/dashboard/api/dashboard_api'; import { DashboardVersionError, DashboardWithAccessInfo } from 'app/features/dashboard/api/types'; @@ -26,18 +31,26 @@ import { UnifiedDashboardScenePageStateManager, DASHBOARD_CACHE_TTL, } from './DashboardScenePageStateManager'; +const fetchMock = jest.fn(); -// Mock the config module jest.mock('@grafana/runtime', () => { const original = jest.requireActual('@grafana/runtime'); + const originalGetBackendSrv = original.getBackendSrv; return { ...original, + getBackendSrv: () => { + const originalSrv = originalGetBackendSrv(); + return { + ...originalSrv, + fetch: fetchMock, + }; + }, config: { ...original.config, featureToggles: { ...original.config.featureToggles, - dashboardNewLayouts: false, // Default value - reloadDashboardsOnParamsChange: false, // Default value + dashboardNewLayouts: false, + reloadDashboardsOnParamsChange: false, }, datasources: { 'gdev-testdata': { @@ -81,6 +94,29 @@ jest.mock('app/features/playlist/PlaylistSrv', () => ({ }, })); +const createTestStore = () => + configureStore({ + reducer: { + [provisioningAPIv0alpha1.reducerPath]: provisioningAPIv0alpha1.reducer, + }, + middleware: (getDefaultMiddleware) => getDefaultMiddleware().concat(provisioningAPIv0alpha1.middleware), + }); + +let testStore: ReturnType; + +jest.mock('app/store/store', () => { + const actual = jest.requireActual('app/store/store'); + return { + ...actual, + dispatch: jest.fn((action: UnknownAction) => { + if (testStore) { + return testStore.dispatch(action); + } + return action; + }), + }; +}); + const setupDashboardAPI = ( d: DashboardWithAccessInfo | undefined, spy: jest.Mock, @@ -161,10 +197,13 @@ beforeEach(() => { jest.clearAllMocks(); mockDashboardLoader.loadDashboard.mockReset(); mockDashboardLoader.loadSnapshot.mockReset(); + fetchMock.mockReset(); // Reset locationService mocks locationService.getSearch = jest.fn().mockReturnValue(new URLSearchParams()); locationService.getSearchObject = jest.fn().mockReturnValue({}); + + testStore = createTestStore(); }); describe('DashboardScenePageStateManager v1', () => { @@ -1558,10 +1597,9 @@ describe('UnifiedDashboardScenePageStateManager', () => { describe('Provisioned dashboard', () => { it('should load a provisioned v1 dashboard', async () => { + fetchMock.mockImplementation(() => of(createFetchResponse(v1ProvisionedDashboardResource))); + const loader = new UnifiedDashboardScenePageStateManager({}); - setBackendSrv({ - get: () => Promise.resolve(v1ProvisionedDashboardResource), - } as unknown as BackendSrv); await loader.loadDashboard({ uid: 'blah-blah', route: DashboardRoutes.Provisioning }); expect(loader.state.dashboard).toBeDefined(); @@ -1572,10 +1610,9 @@ describe('UnifiedDashboardScenePageStateManager', () => { }); it('should load a provisioned v2 dashboard', async () => { + fetchMock.mockImplementation(() => of(createFetchResponse(v2ProvisionedDashboardResource))); + const loader = new UnifiedDashboardScenePageStateManager({}); - setBackendSrv({ - get: () => Promise.resolve(v2ProvisionedDashboardResource), - } as unknown as BackendSrv); await loader.loadDashboard({ uid: 'blah-blah', route: DashboardRoutes.Provisioning }); expect(loader.state.dashboard).toBeDefined(); diff --git a/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.ts b/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.ts index 27902c226c2..00c02b5d727 100644 --- a/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.ts +++ b/public/app/features/dashboard-scene/pages/DashboardScenePageStateManager.ts @@ -3,7 +3,7 @@ import { t } from '@grafana/i18n'; import { config, getBackendSrv, isFetchError, locationService } from '@grafana/runtime'; import { sceneGraph } from '@grafana/scenes'; import { Spec as DashboardV2Spec } from '@grafana/schema/dist/esm/schema/dashboard/v2'; -import { BASE_URL } from 'app/api/clients/provisioning/v0alpha1/baseAPI'; +import { GetRepositoryFilesWithPathApiResponse, provisioningAPIv0alpha1 } from 'app/api/clients/provisioning/v0alpha1'; import { StateManagerBase } from 'app/core/services/StateManagerBase'; import { getMessageFromError, getMessageIdFromError, getStatusFromError } from 'app/core/utils/errors'; import { startMeasure, stopMeasure } from 'app/core/utils/metrics'; @@ -20,9 +20,10 @@ import { isDashboardV2Resource, isDashboardV2Spec, isV2StoredVersion } from 'app import { dashboardLoaderSrv, DashboardLoaderSrvV2 } from 'app/features/dashboard/services/DashboardLoaderSrv'; import { getDashboardSrv } from 'app/features/dashboard/services/DashboardSrv'; import { emitDashboardViewEvent } from 'app/features/dashboard/state/analyticsProcessor'; -import { trackDashboardSceneLoaded } from 'app/features/dashboard/utils/tracking'; +import { trackDashboardSceneLoaded } from 'app/features/dashboard-scene/utils/tracking'; import { playlistSrv } from 'app/features/playlist/PlaylistSrv'; import { ProvisioningPreview } from 'app/features/provisioning/types'; +import { dispatch } from 'app/store/store'; import { DashboardDataDTO, DashboardDTO, @@ -178,26 +179,46 @@ abstract class DashboardScenePageStateManagerBase const params = new URLSearchParams(window.location.search); const ref = params.get('ref') ?? undefined; // commit hash or branch - const url = `${BASE_URL}/repositories/${repo}/files/${path}`; - return getBackendSrv() - .get(url, ref ? { ref } : undefined) - .then((v) => { - // Load the results from dryRun - const dryRun = v.resource.dryRun; - if (!dryRun) { - return Promise.reject('failed to read provisioned dashboard'); - } + const loadWithRef = async (refParam: string | undefined) => { + const result = await dispatch( + provisioningAPIv0alpha1.endpoints.getRepositoryFilesWithPath.initiate({ + name: repo, + path: path, + ref: refParam, + }) + ); - if (!dryRun.apiVersion.startsWith('dashboard.grafana.app')) { - return Promise.reject('unexpected resource type: ' + dryRun.apiVersion); - } + if (result && 'error' in result) { + throw result.error; + } - return this.processDashboardFromProvisioning(repo, path, dryRun, { - file: url, - ref: ref, - repo: repo, - }); + const v: GetRepositoryFilesWithPathApiResponse = structuredClone(result.data); + // Load the results from dryRun + const dryRun = v.resource.dryRun; + if (!dryRun) { + return Promise.reject('failed to read provisioned dashboard'); + } + + if (!dryRun.apiVersion.startsWith('dashboard.grafana.app')) { + return Promise.reject('unexpected resource type: ' + dryRun.apiVersion); + } + + return this.processDashboardFromProvisioning(repo, path, dryRun, { + file: v.path ?? '', + ref: refParam, + repo: repo, }); + }; + + try { + return await loadWithRef(ref); + } catch (err) { + // If ref is not found (404), retry without ref to default to the main branch + if (ref && isFetchError(err) && err.status === 404) { + return await loadWithRef(undefined); + } + throw err; + } } private processDashboardFromProvisioning( diff --git a/public/app/features/dashboard-scene/saving/useSaveDashboard.ts b/public/app/features/dashboard-scene/saving/useSaveDashboard.ts index 4d99b26648c..7bfc54a752b 100644 --- a/public/app/features/dashboard-scene/saving/useSaveDashboard.ts +++ b/public/app/features/dashboard-scene/saving/useSaveDashboard.ts @@ -2,7 +2,7 @@ import { useAsyncFn } from 'react-use'; import { locationUtil } from '@grafana/data'; import { t } from '@grafana/i18n'; -import { locationService, reportInteraction } from '@grafana/runtime'; +import { locationService } from '@grafana/runtime'; import { Dashboard } from '@grafana/schema'; import { Spec as DashboardV2Spec } from '@grafana/schema/dist/esm/schema/dashboard/v2'; import appEvents from 'app/core/app_events'; @@ -15,6 +15,8 @@ import { useDispatch } from 'app/types/store'; import { updateDashboardUidLastUsedDatasource } from '../../dashboard/utils/dashboard'; import { DashboardScene } from '../scene/DashboardScene'; +import { DashboardInteractions } from '../utils/interactions'; +import { trackDashboardSceneCreatedOrSaved } from '../utils/tracking'; export function useSaveDashboard(isCopy = false) { const dispatch = useDispatch(); @@ -55,25 +57,32 @@ export function useSaveDashboard(isCopy = false) { throw result.error; } - const resultData = result.data; + // result.data is readonly so spreading to allow for slug edits + const resultData: typeof result.data = { ...result.data }; + + // TODO: use slug from response once implemented + // reuse existing slug to avoid "Unsaved changes" modal after save + // due to slugify logic difference between frontend and backend + if (!result.data.slug && scene.state.meta.slug) { + const slug = scene.state.meta.slug; + resultData.slug = slug; + resultData.url = `${result.data.url}/${slug}`; + } + scene.saveCompleted(saveModel, resultData, options.folderUid); // important that these happen before location redirect below appEvents.publish(new DashboardSavedEvent()); notifyApp.success(t('dashboard-scene.use-save-dashboard.message-dashboard-saved', 'Dashboard saved')); - //Update local storage dashboard to handle things like last used datasource updateDashboardUidLastUsedDatasource(resultData.uid); if (isCopy) { - reportInteraction('grafana_dashboard_copied', { - name: saveModel.title, - url: resultData.url, - }); + DashboardInteractions.dashboardCopied({ name: saveModel.title || '', url: resultData.url }); } else { - reportInteraction(`grafana_dashboard_${options.isNew ? 'created' : 'saved'}`, { - name: saveModel.title, - url: resultData.url, + trackDashboardSceneCreatedOrSaved(!!options.isNew, scene, { + name: saveModel.title || '', + url: resultData.url || '', }); } diff --git a/public/app/features/dashboard-scene/scene/DashboardScene.tsx b/public/app/features/dashboard-scene/scene/DashboardScene.tsx index 867bfc5d22c..f918af82afa 100644 --- a/public/app/features/dashboard-scene/scene/DashboardScene.tsx +++ b/public/app/features/dashboard-scene/scene/DashboardScene.tsx @@ -589,13 +589,14 @@ export class DashboardScene extends SceneObjectBase impleme this.setState({ overlay: undefined }); } - public async onStarDashboard() { + public async onStarDashboard(isStarred?: boolean) { const { meta, uid } = this.state; + isStarred = isStarred ?? Boolean(meta.isStarred); if (!uid) { return; } try { - const result = await getDashboardSrv().starDashboard(uid, Boolean(meta.isStarred)); + const result = await getDashboardSrv().starDashboard(uid, isStarred); this.setState({ meta: { @@ -716,6 +717,10 @@ export class DashboardScene extends SceneObjectBase impleme return this.serializer.getTrackingInformation(this); } + public getDynamicDashboardsTrackingInformation() { + return this.serializer.getDynamicDashboardsTrackingInformation(this); + } + public async onDashboardDelete() { // Need to mark it non dirty to navigate away without unsaved changes warning this.setState({ isDirty: false }); diff --git a/public/app/features/dashboard-scene/scene/NavToolbarActions.test.tsx b/public/app/features/dashboard-scene/scene/NavToolbarActions.test.tsx index 0a806693b3e..e42df99d74a 100644 --- a/public/app/features/dashboard-scene/scene/NavToolbarActions.test.tsx +++ b/public/app/features/dashboard-scene/scene/NavToolbarActions.test.tsx @@ -160,21 +160,30 @@ describe('NavToolbarActions', () => { it('should call DashboardInteractions.editButtonClicked with outlineExpanded:true if grafana.dashboard.edit-pane.outline.collapsed is undefined', async () => { setup(); await userEvent.click(await screen.findByTestId(selectors.components.NavToolbar.editDashboard.editButton)); - expect(DashboardInteractions.editButtonClicked).toHaveBeenCalledWith({ outlineExpanded: false }); + expect(DashboardInteractions.editButtonClicked).toHaveBeenCalledWith({ + dashboardUid: 'dash-1', + outlineExpanded: false, + }); }); it('should call DashboardInteractions.editButtonClicked with outlineExpanded:true if grafana.dashboard.edit-pane.outline.collapsed is false', async () => { localStorageMock.setItem('grafana.dashboard.edit-pane.outline.collapsed', 'false'); setup(); await userEvent.click(await screen.findByTestId(selectors.components.NavToolbar.editDashboard.editButton)); - expect(DashboardInteractions.editButtonClicked).toHaveBeenCalledWith({ outlineExpanded: true }); + expect(DashboardInteractions.editButtonClicked).toHaveBeenCalledWith({ + dashboardUid: 'dash-1', + outlineExpanded: true, + }); }); it('should call DashboardInteractions.editButtonClicked with outlineExpanded:false if grafana.dashboard.edit-pane.outline.collapsed is true', async () => { localStorageMock.setItem('grafana.dashboard.edit-pane.outline.collapsed', 'true'); setup(); await userEvent.click(await screen.findByTestId(selectors.components.NavToolbar.editDashboard.editButton)); - expect(DashboardInteractions.editButtonClicked).toHaveBeenCalledWith({ outlineExpanded: false }); + expect(DashboardInteractions.editButtonClicked).toHaveBeenCalledWith({ + dashboardUid: 'dash-1', + outlineExpanded: false, + }); }); }); }); diff --git a/public/app/features/dashboard-scene/scene/NavToolbarActions.tsx b/public/app/features/dashboard-scene/scene/NavToolbarActions.tsx index 210561d3654..4e8caac3bc6 100644 --- a/public/app/features/dashboard-scene/scene/NavToolbarActions.tsx +++ b/public/app/features/dashboard-scene/scene/NavToolbarActions.tsx @@ -321,7 +321,7 @@ export function ToolbarActions({ dashboard }: Props) { render: () => (
} > - - ) : ( - - )} -
+ {healthStatusNotReady ? ( + <> + + + Repository connecting, synchronize will be ready soon. + + + + + + + + + ) : ( + + {hasError || (checked !== undefined && isRepositoryHealthy === false) ? ( + + ) : ( + + )} + + )}
); }); diff --git a/public/app/features/provisioning/Wizard/hooks/useResourceStats.ts b/public/app/features/provisioning/Wizard/hooks/useResourceStats.ts index 4afc252e0e5..49573b4e793 100644 --- a/public/app/features/provisioning/Wizard/hooks/useResourceStats.ts +++ b/public/app/features/provisioning/Wizard/hooks/useResourceStats.ts @@ -11,24 +11,27 @@ import { useGetRepositoryFilesQuery, useGetResourceStatsQuery, } from 'app/api/clients/provisioning/v0alpha1'; +import { ManagerKind } from 'app/features/apiserver/types'; function getManagedCount(managed?: ManagerStats[]) { let totalCount = 0; // Loop through each managed repository managed?.forEach((manager) => { - // Loop through stats inside each manager and sum up the counts - manager.stats.forEach((stat) => { - if (stat.group === 'folder.grafana.app' || stat.group === 'dashboard.grafana.app') { - totalCount += stat.count; - } - }); + if (manager.kind === ManagerKind.Repo) { + // Loop through stats inside each manager and sum up the counts + manager.stats.forEach((stat) => { + if (stat.group === 'folder.grafana.app' || stat.group === 'dashboard.grafana.app') { + totalCount += stat.count; + } + }); + } }); return totalCount; } -function getResourceCount(stats?: ResourceCount[]) { +function getResourceCount(stats?: ResourceCount[], managed?: ManagerStats[]) { let counts: string[] = []; let resourceCount = 0; @@ -46,6 +49,26 @@ function getResourceCount(stats?: ResourceCount[]) { } }); + managed?.forEach((manager) => { + if (manager.kind !== ManagerKind.Repo) { + manager.stats.forEach((stat) => { + switch (stat.group) { + case 'folders': + case 'folder.grafana.app': + resourceCount += stat.count; + counts.push(t('provisioning.bootstrap-step.folders-count', '{{count}} folder', { count: stat.count })); + break; + case 'dashboard.grafana.app': + resourceCount += stat.count; + counts.push( + t('provisioning.bootstrap-step.dashboards-count', '{{count}} dashboard', { count: stat.count }) + ); + break; + } + }); + } + }); + return { counts, resourceCount, @@ -92,7 +115,9 @@ export function useResourceStats(repoName?: string, isLegacyStorage?: boolean, s return { // managed does not exist in response when first time connecting to a repo managedCount: getManagedCount(resourceStatsQuery.data?.managed), - unmanagedCount: getResourceCount(resourceStatsQuery.data?.unmanaged).resourceCount, + // "unmanaged" means unmanaged by git sync. it may still be managed by other means, like terraform, plugins, file provisioning, etc. + unmanagedCount: getResourceCount(resourceStatsQuery.data?.unmanaged, resourceStatsQuery.data?.managed) + .resourceCount, }; }, [resourceStatsQuery.data]); diff --git a/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboard.tsx b/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboard.tsx index 20077e04361..fca774ef84c 100644 --- a/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboard.tsx +++ b/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboard.tsx @@ -13,8 +13,7 @@ export interface SaveProvisionedDashboardProps { } export function SaveProvisionedDashboard({ drawer, changeInfo, dashboard }: SaveProvisionedDashboardProps) { - const { isNew, defaultValues, loadedFromRef, workflowOptions, readOnly, repository } = - useProvisionedDashboardData(dashboard); + const { isNew, defaultValues, workflowOptions, readOnly, repository } = useProvisionedDashboardData(dashboard); if (!defaultValues) { return null; @@ -27,7 +26,6 @@ export function SaveProvisionedDashboard({ drawer, changeInfo, dashboard }: Save changeInfo={changeInfo} isNew={isNew} defaultValues={defaultValues} - loadedFromRef={loadedFromRef} repository={repository} workflowOptions={workflowOptions} readOnly={readOnly} diff --git a/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboardForm.tsx b/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboardForm.tsx index a3a2b79f9c9..368076cafae 100644 --- a/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboardForm.tsx +++ b/public/app/features/provisioning/components/Dashboards/SaveProvisionedDashboardForm.tsx @@ -33,7 +33,6 @@ import { SaveProvisionedDashboardProps } from './SaveProvisionedDashboard'; export interface Props extends SaveProvisionedDashboardProps { isNew: boolean; defaultValues: ProvisionedDashboardFormData; - loadedFromRef?: string; workflowOptions: Array<{ label: string; value: string }>; readOnly: boolean; repository?: RepositoryView; @@ -45,7 +44,6 @@ export function SaveProvisionedDashboardForm({ drawer, changeInfo, isNew, - loadedFromRef, workflowOptions, readOnly, repository, diff --git a/public/app/features/provisioning/utils/getFormErrors.ts b/public/app/features/provisioning/utils/getFormErrors.ts index 7adcca8063c..3d94975a602 100644 --- a/public/app/features/provisioning/utils/getFormErrors.ts +++ b/public/app/features/provisioning/utils/getFormErrors.ts @@ -3,7 +3,7 @@ import { ErrorDetails } from 'app/api/clients/provisioning/v0alpha1'; import { WizardFormData } from '../Wizard/types'; export type RepositoryField = keyof WizardFormData['repository']; -export type RepositoryFormPath = `repository.${RepositoryField}`; +export type RepositoryFormPath = `repository.${RepositoryField}` | `repository.sync.intervalSeconds`; export type FormErrorTuple = [RepositoryFormPath | null, { message: string } | null]; /** @@ -25,7 +25,13 @@ export const getFormErrors = (errors: ErrorDetails[]): FormErrorTuple => { 'bitbucket.url', 'git.branch', 'git.url', + 'sync.intervalSeconds', ]; + + const nestedFieldMap: Record = { + 'sync.intervalSeconds': 'repository.sync.intervalSeconds', + }; + const fieldMap: Record = { path: 'repository.path', branch: 'repository.branch', @@ -37,6 +43,12 @@ export const getFormErrors = (errors: ErrorDetails[]): FormErrorTuple => { if (error.field) { const cleanField = error.field.replace('spec.', ''); if (fieldsToValidate.includes(cleanField)) { + // Check for direct nested field mapping first + if (cleanField in nestedFieldMap) { + return [nestedFieldMap[cleanField], { message: error.detail || `Invalid ${cleanField}` }]; + } + + // Fall back to simple field mapping for non-nested fields const fieldParts = cleanField.split('.'); const lastPart = fieldParts[fieldParts.length - 1]; diff --git a/public/app/features/query/components/QueryEditorRow.test.tsx b/public/app/features/query/components/QueryEditorRow.test.tsx index e87dd718b95..f839e570819 100644 --- a/public/app/features/query/components/QueryEditorRow.test.tsx +++ b/public/app/features/query/components/QueryEditorRow.test.tsx @@ -4,6 +4,7 @@ import { PropsWithChildren } from 'react'; import { CoreApp, DataQueryRequest, dateTime, LoadingState, PanelData, toDataFrame } from '@grafana/data'; import { DataQuery } from '@grafana/schema'; import { mockDataSource } from 'app/features/alerting/unified/mocks'; +import { ExpressionDatasourceUID } from 'app/features/expressions/types'; import { filterPanelDataToQuery, Props, QueryEditorRow } from './QueryEditorRow'; @@ -464,5 +465,28 @@ describe('QueryEditorRow', () => { expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument(); }); }); + + it('should not render saved queries buttons when query is an expression query', async () => { + const expressionQuery = { + refId: 'B', + datasource: { + uid: ExpressionDatasourceUID, + type: '__expr__', + }, + }; + + const expressionProps = { + ...props(testData), + query: expressionQuery, + queries: [expressionQuery], + }; + + render(); + + await waitFor(() => { + expect(screen.queryByText('Save query')).not.toBeInTheDocument(); + expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument(); + }); + }); }); }); diff --git a/public/app/features/query/components/QueryEditorRow.tsx b/public/app/features/query/components/QueryEditorRow.tsx index fb469d17aa7..fe8fa6c018e 100644 --- a/public/app/features/query/components/QueryEditorRow.tsx +++ b/public/app/features/query/components/QueryEditorRow.tsx @@ -34,6 +34,7 @@ import { } from 'app/core/components/QueryOperationRow/QueryOperationRow'; import { useQueryLibraryContext } from '../../explore/QueryLibrary/QueryLibraryContext'; +import { ExpressionDatasourceUID } from '../../expressions/types'; import { QueryActionComponent, RowActionComponents } from './QueryActionComponent'; import { QueryEditorRowHeader } from './QueryEditorRowHeader'; @@ -386,10 +387,11 @@ export class QueryEditorRow extends PureComponent - {!isEditingQueryLibrary && !isUnifiedAlerting && ( + {!isEditingQueryLibrary && !isUnifiedAlerting && !isExpressionQuery && ( scope !== undefined); } + async fetchMultipleScopeNodes(names: string[]): Promise { + if (!config.featureToggles.useMultipleScopeNodesEndpoint || names.length === 0) { + return Promise.resolve([]); + } + + try { + const res = await getBackendSrv().get<{ items: ScopeNode[] }>(apiUrl + `/find/scope_node_children`, { + names: names, + }); + return res?.items ?? []; + } catch (err) { + return []; + } + } + /** * Fetches a map of nodes based on the specified options. * diff --git a/public/app/features/scopes/ScopesService.ts b/public/app/features/scopes/ScopesService.ts index 5c5d4b33147..0e6f3500839 100644 --- a/public/app/features/scopes/ScopesService.ts +++ b/public/app/features/scopes/ScopesService.ts @@ -76,8 +76,8 @@ export class ScopesService implements ScopesContextValue { // Pre-load parent node, to prevent UI flickering if (parentNodeId) { - this.selectorService.getScopeNode(parentNodeId).catch((error) => { - console.error('Failed to pre-load parent node', error); + this.selectorService.resolvePathToRoot(parentNodeId, this.selectorService.state.tree!).catch((error) => { + console.error('Failed to pre-load parent node path', error); }); } @@ -94,7 +94,9 @@ export class ScopesService implements ScopesContextValue { const parentNode = queryParams.get('scope_parent'); const scopes = queryParams.getAll('scopes'); - if (scopes.length) { + // Check if new scopes are different from the old scopes + const currentScopes = this.selectorService.state.appliedScopes.map((scope) => scope.scopeId); + if (scopes.length && !isEqual(scopes, currentScopes)) { // We only update scopes but never delete them. This is to keep the scopes in memory if user navigates to // page that does not use scopes (like from dashboard to dashboard list back to dashboard). If user // changes the URL directly, it would trigger a reload so scopes would still be reset. @@ -105,17 +107,21 @@ export class ScopesService implements ScopesContextValue { // Update the URL based on change in the scopes state this.subscriptions.push( - selectorService.subscribeToState((state, prev) => { - const oldParentNode = prev.appliedScopes[0]?.parentNodeId; + selectorService.subscribeToState((state, prevState) => { + const oldParentNode = prevState.appliedScopes[0]?.parentNodeId; const newParentNode = state.appliedScopes[0]?.parentNodeId; - if (oldParentNode !== newParentNode && newParentNode) { - this.locationService.partial({ scope_parent: newParentNode }, true); - } - const oldScopeNames = prev.appliedScopes.map((scope) => scope.scopeId); + const parentNodeChanged = oldParentNode !== newParentNode; + + const oldScopeNames = prevState.appliedScopes.map((scope) => scope.scopeId); const newScopeNames = state.appliedScopes.map((scope) => scope.scopeId); - if (!isEqual(oldScopeNames, newScopeNames)) { - this.locationService.partial({ scopes: newScopeNames }, true); + + const scopesChanged = !isEqual(oldScopeNames, newScopeNames); + if (scopesChanged) { + this.locationService.partial( + { scopes: newScopeNames, scope_parent: parentNodeChanged ? newParentNode || null : oldParentNode }, + true + ); } }) ); diff --git a/public/app/features/scopes/selector/RecentScopes.tsx b/public/app/features/scopes/selector/RecentScopes.tsx index efd190f7ec3..66771f635fb 100644 --- a/public/app/features/scopes/selector/RecentScopes.tsx +++ b/public/app/features/scopes/selector/RecentScopes.tsx @@ -39,7 +39,9 @@ export const RecentScopes = ({ recentScopes, onSelect }: RecentScopesProps) => { recentScopes.map((recentScopeSet) => (
- -
- -
-
+ {options.filters.length > 1 && ( + +
+ +
+
+ )} {options.filters.map((filter, idx) => ( > = []; + const linkLookup = new Set(); for (const field of visibleFields) { const value = field.values[rowIndex]; const fieldDisplay = field.display ? field.display(value) : { text: `${value}`, numeric: +value }; - links.push(...getDataLinks(field, rowIndex)); + getDataLinks(field, rowIndex).forEach((link) => { + const key = `${link.title}/${link.href}`; + if (!linkLookup.has(key)) { + links.push(link); + linkLookup.add(key); + } + }); displayValues.push({ name: getFieldDisplayName(field, data), diff --git a/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json b/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json index 0d9105711ae..e5b7e6896ec 100644 --- a/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json +++ b/public/app/plugins/datasource/grafana-postgresql-datasource/plugin.json @@ -2,6 +2,7 @@ "type": "datasource", "name": "PostgreSQL", "id": "grafana-postgresql-datasource", + "executable": "gpx_grafana-postgresql-datasource", "aliasIDs": ["postgres"], "category": "sql", @@ -21,6 +22,9 @@ { "name": "Documentation", "url": "https://grafana.com/docs/grafana/latest/datasources/postgres/" } ] }, + "dependencies": { + "grafanaDependency": ">=11.6.0" + }, "alerting": true, "annotations": true, diff --git a/public/app/plugins/datasource/grafana-testdata-datasource/dataquery.ts b/public/app/plugins/datasource/grafana-testdata-datasource/dataquery.ts index 4d4a2084fc1..e7eb62da832 100644 --- a/public/app/plugins/datasource/grafana-testdata-datasource/dataquery.ts +++ b/public/app/plugins/datasource/grafana-testdata-datasource/dataquery.ts @@ -21,6 +21,7 @@ export enum TestDataQueryType { NodeGraph = 'node_graph', PredictableCSVWave = 'predictable_csv_wave', PredictablePulse = 'predictable_pulse', + QueryMeta = 'query_meta', RandomWalk = 'random_walk', RandomWalkTable = 'random_walk_table', RandomWalkWithError = 'random_walk_with_error', diff --git a/public/app/plugins/datasource/grafana-testdata-datasource/mocks/scenarios.ts b/public/app/plugins/datasource/grafana-testdata-datasource/mocks/scenarios.ts index 20d4178b1a4..b9c3c9df2ce 100644 --- a/public/app/plugins/datasource/grafana-testdata-datasource/mocks/scenarios.ts +++ b/public/app/plugins/datasource/grafana-testdata-datasource/mocks/scenarios.ts @@ -74,6 +74,12 @@ export const scenarios = [ name: 'Predictable Pulse', stringInput: '', }, + { + description: '', + id: TestDataQueryType.QueryMeta, + name: 'Query Metadata', + stringInput: '', + }, { description: '', id: TestDataQueryType.RandomWalk, diff --git a/public/app/plugins/datasource/graphite/datasource.ts b/public/app/plugins/datasource/graphite/datasource.ts index 0a90bc776b7..dd6915c0aaa 100644 --- a/public/app/plugins/datasource/graphite/datasource.ts +++ b/public/app/plugins/datasource/graphite/datasource.ts @@ -1029,9 +1029,15 @@ export class GraphiteDatasource }; if (config.featureToggles.graphiteBackendMode) { - const functions = await this.getResource('functions'); - this.funcDefs = gfunc.parseFuncDefs(functions); - return this.funcDefs; + try { + const functions = await this.getResource('functions'); + this.funcDefs = gfunc.parseFuncDefs(functions); + return this.funcDefs; + } catch (error) { + console.error('Fetching graphite functions error', error); + this.funcDefs = gfunc.getFuncDefs(this.graphiteVersion); + return this.funcDefs; + } } return lastValueFrom( diff --git a/public/app/plugins/datasource/influxdb/components/editor/config/ConfigEditor.tsx b/public/app/plugins/datasource/influxdb/components/editor/config/ConfigEditor.tsx index 3bb5ccc3941..7c7086b2dd3 100644 --- a/public/app/plugins/datasource/influxdb/components/editor/config/ConfigEditor.tsx +++ b/public/app/plugins/datasource/influxdb/components/editor/config/ConfigEditor.tsx @@ -8,7 +8,7 @@ import { updateDatasourcePluginJsonDataOption, } from '@grafana/data'; import { config } from '@grafana/runtime'; -import { Alert, DataSourceHttpSettings, InlineField, Select, Field, Input, FieldSet, TextLink } from '@grafana/ui'; +import { Alert, DataSourceHttpSettings, InlineField, Select, Field, Input, FieldSet } from '@grafana/ui'; import { BROWSER_MODE_DISABLED_MESSAGE } from '../../../constants'; import { InfluxOptions, InfluxOptionsV1, InfluxVersion } from '../../../types'; @@ -60,11 +60,6 @@ export class ConfigEditor extends PureComponent { this.htmlPrefix = uniqueId('influxdb-config'); } - versionNotice = { - Flux: 'Support for Flux in Grafana is currently in beta', - SQL: 'Support for SQL in Grafana is currently in alpha', - }; - onVersionChanged = (selected: SelectableValue) => { const { options, onOptionsChange } = this.props; @@ -126,17 +121,6 @@ export class ConfigEditor extends PureComponent { - {options.jsonData.version !== InfluxVersion.InfluxQL && ( - -

- Please report any issues to:
- - https://github.com/grafana/grafana/issues - -

-
- )} - {isDirectAccess && ( {BROWSER_MODE_DISABLED_MESSAGE} diff --git a/public/app/plugins/datasource/influxdb/datasource.test.ts b/public/app/plugins/datasource/influxdb/datasource.test.ts index 12ae898d58d..ec81ad57f1d 100644 --- a/public/app/plugins/datasource/influxdb/datasource.test.ts +++ b/public/app/plugins/datasource/influxdb/datasource.test.ts @@ -273,6 +273,15 @@ describe('interpolateQueryExpr', () => { replace: jest.fn().mockImplementation((...rest: unknown[]) => 'templateVarReplaced'), } as unknown as TemplateSrv; let ds = getMockInfluxDS(getMockDSInstanceSettings(), templateSrvStub); + + // Mock console.warn as we expect tests to use it + beforeEach(() => { + jest.spyOn(console, 'warn').mockImplementation(); + }); + afterEach(() => { + jest.restoreAllMocks(); + }); + it('should return the value as it is', () => { const value = 'normalValue'; const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build(); @@ -281,6 +290,18 @@ describe('interpolateQueryExpr', () => { expect(result).toBe(expectation); }); + it('should return the escaped value if the value wrapped in regex without !~ or =~', () => { + const value = '/special/path'; + const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build(); + const result = ds.interpolateQueryExpr( + value, + variableMock, + 'select atan(z/sqrt(3.14)), that where path /$tempVar/' + ); + const expectation = `\\/special\\/path`; + expect(result).toBe(expectation); + }); + it('should return the escaped value if the value wrapped in regex', () => { const value = '/special/path'; const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build(); diff --git a/public/app/plugins/datasource/influxdb/datasource.ts b/public/app/plugins/datasource/influxdb/datasource.ts index b073f333fcd..9fb42a6f236 100644 --- a/public/app/plugins/datasource/influxdb/datasource.ts +++ b/public/app/plugins/datasource/influxdb/datasource.ts @@ -360,10 +360,7 @@ export default class InfluxDatasource extends DataSourceWithBackend escapeRegex(v)).join('|')})`; + // If the value is a string array first escape them then join them with pipe + // then put inside parenthesis. + return typeof value === 'string' ? escapeRegex(value) : `(${value.map((v) => escapeRegex(v)).join('|')})`; + } catch (e) { + console.warn(`Supplied match is not valid regex: ${match}`); + } } return value; diff --git a/public/app/plugins/datasource/tempo/datasource.test.ts b/public/app/plugins/datasource/tempo/datasource.test.ts index 05c5399d8f9..737feaa4851 100644 --- a/public/app/plugins/datasource/tempo/datasource.test.ts +++ b/public/app/plugins/datasource/tempo/datasource.test.ts @@ -52,17 +52,6 @@ import { createTempoDatasource } from './test/mocks'; import { initTemplateSrv } from './test/test_utils'; import { TempoJsonData, TempoQuery } from './types'; -let mockObservable: () => Observable; -jest.mock('@grafana/runtime', () => { - return { - ...jest.requireActual('@grafana/runtime'), - getBackendSrv: () => ({ - fetch: mockObservable, - _request: mockObservable, - }), - }; -}); - describe('Tempo data source', () => { // Mock the console error so that running the test suite doesnt throw the error const origError = console.error; @@ -339,20 +328,6 @@ describe('Tempo data source', () => { expect(edgesFrame.meta?.preferredVisualisationType).toBe('nodeGraph'); }); - describe('test the testDatasource function', () => { - it('should return a success msg if response.ok is true', async () => { - mockObservable = () => of({ ok: true }); - const handleStreamingQuery = jest - .spyOn(TempoDatasource.prototype, 'handleStreamingQuery') - .mockImplementation(() => of({ data: [] })); - - const ds = new TempoDatasource(defaultSettings); - const response = await ds.testDatasource(); - expect(response.status).toBe('success'); - expect(handleStreamingQuery).toHaveBeenCalled(); - }); - }); - describe('test the metadataRequest function', () => { it('should return the data from getResource', async () => { const ds = new TempoDatasource(defaultSettings); diff --git a/public/app/plugins/datasource/tempo/datasource.ts b/public/app/plugins/datasource/tempo/datasource.ts index 193c6f053ff..2ded44441f6 100644 --- a/public/app/plugins/datasource/tempo/datasource.ts +++ b/public/app/plugins/datasource/tempo/datasource.ts @@ -1,5 +1,5 @@ import { groupBy } from 'lodash'; -import { EMPTY, forkJoin, from, lastValueFrom, merge, Observable, of } from 'rxjs'; +import { EMPTY, from, merge, Observable, of } from 'rxjs'; import { catchError, concatMap, finalize, map, mergeMap, toArray } from 'rxjs/operators'; import { @@ -931,86 +931,7 @@ export class TempoDatasource extends DataSourceWithBackend { - const observables = []; - - const options: BackendSrvRequest = { - headers: {}, - method: 'GET', - url: `${this.instanceSettings.url}/api/echo`, - }; - observables.push( - getBackendSrv() - .fetch(options) - .pipe( - mergeMap(() => { - return of({ status: 'success', message: 'Health check succeeded' }); - }), - catchError((err) => { - return of({ - status: 'error', - message: getErrorMessage(err?.data?.message, 'Unable to connect with Tempo'), - }); - }) - ) - ); - - if (this.streamingEnabled?.search) { - const now = new Date(); - const from = new Date(now); - from.setMinutes(from.getMinutes() - 15); - observables.push( - this.handleStreamingQuery( - { - range: { - from: dateTime(from), - to: dateTime(now), - raw: { from: 'now-15m', to: 'now' }, - }, - requestId: '', - interval: '', - intervalMs: 0, - scopedVars: {}, - targets: [], - timezone: '', - app: '', - startTime: 0, - }, - [ - { - datasource: this.instanceSettings, - limit: 1, - query: '{}', - queryType: 'traceql', - refId: 'A', - tableType: SearchTableType.Traces, - filters: [], - }, - ], - '{}' - ).pipe( - mergeMap(() => { - return of({ status: 'success', message: 'Streaming test succeeded.' }); - }), - catchError((err) => { - return of({ - status: 'error', - message: getErrorMessage(err?.data?.message, 'Test for streaming failed, consider disabling streaming'), - }); - }) - ) - ); - } - - return await lastValueFrom( - forkJoin(observables).pipe( - mergeMap((observableResults) => { - const erroredResult = observableResults.find((result) => result.status !== 'success'); - return erroredResult - ? of(erroredResult) - : of({ status: 'success', message: 'Successfully connected to Tempo data source.' }); - }) - ) - ); + return await super.testDatasource(); } getQueryDisplayText(query: TempoQuery) { diff --git a/public/app/plugins/panel/geomap/editor/layerEditor.tsx b/public/app/plugins/panel/geomap/editor/layerEditor.tsx index 4f70a82e587..d85a00afd4e 100644 --- a/public/app/plugins/panel/geomap/editor/layerEditor.tsx +++ b/public/app/plugins/panel/geomap/editor/layerEditor.tsx @@ -101,7 +101,9 @@ export function getLayerEditor(opts: LayerEditorOptions): NestedPanelOptions - - {({ width }) => { - if (width === 0) { - return null; - } - return ( - - ); - }} - +
); }; @@ -52,8 +42,6 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp const getStyles = (theme: GrafanaTheme2) => ({ editorBox: css({ label: 'editorBox', - border: `1px solid ${theme.colors.border.medium}`, - borderRadius: theme.shape.radius.default, margin: theme.spacing(0.5, 0), width: '100%', }), diff --git a/public/app/routes/routes.tsx b/public/app/routes/routes.tsx index 3767ed90322..80b57083042 100644 --- a/public/app/routes/routes.tsx +++ b/public/app/routes/routes.tsx @@ -364,14 +364,6 @@ export function getAppRoutes(): RouteDescriptor[] { () => import(/* webpackChunkName: "AdminEditOrgPage" */ 'app/features/admin/AdminEditOrgPage') ), }, - { - path: '/admin/featuretoggles', - component: config.featureToggles.featureToggleAdminPage - ? SafeDynamicImport( - () => import(/* webpackChunkName: "AdminFeatureTogglesPage" */ 'app/features/admin/AdminFeatureTogglesPage') - ) - : () => , - }, { path: '/admin/stats', component: SafeDynamicImport( diff --git a/public/img/cache-screenshot.png b/public/img/cache-screenshot.png new file mode 100644 index 00000000000..501b5c38f96 Binary files /dev/null and b/public/img/cache-screenshot.png differ diff --git a/public/img/insights-screenshot.png b/public/img/insights-screenshot.png new file mode 100644 index 00000000000..e5291b318d3 Binary files /dev/null and b/public/img/insights-screenshot.png differ diff --git a/public/img/permissions-screenshot.png b/public/img/permissions-screenshot.png new file mode 100644 index 00000000000..affb902fab3 Binary files /dev/null and b/public/img/permissions-screenshot.png differ diff --git a/public/locales/cs-CZ/grafana.json b/public/locales/cs-CZ/grafana.json index dc6dc3c595c..e2048107f20 100644 --- a/public/locales/cs-CZ/grafana.json +++ b/public/locales/cs-CZ/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Přístup nebyl povolen" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Některé funkce jsou stabilní (GA) a povolené ve výchozím nastavení, zatímco některé funkce jsou v současné době ve fázi předběžné beta verze, připravené k implementaci.", - "confirm-modal-body-2": "Před provedením úprav doporučujeme porozumět důsledkům každé změny funkce.", - "confirmText-save-changes": "Uložit změny", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Obecná dostupnost", - "deprecated": "Zastaralé", - "ga": "GA" - }, - "save-changes": "Uložit změny", - "saving": "Ukládání…", - "title-apply-feature-toggle-changes": "Použít změny přepínače funkce" - }, "admin-orgs-table": { "aria-label-delete-org": "Odstranit organizaci", "confirmText-delete": "Odstranit", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Seřadit nástěnky podle oblíbenosti ve vyhledávání", "title-team-sync": "Synchronizace týmu" }, - "feature-toggles": { - "restart-pending": "Probíhá restartování vaší instance Grafana pro použití nejnovějších změn funkce přepínače", - "restart-required": "Uložením změn funkce přepínače dojde k resetování instance, což může trvat několik minut", - "sub-title": "Zobrazit a upravit přepínače funkcí. Další informace o přepínání funkcí najdete na <2>grafana.com." - }, "get-enterprise": { "contact-us": "Kontaktujte nás a získejte bezplatnou zkušební verzi", "description": "Zkušební verzi můžete používat zdarma po dobu 30 dnů. Pošleme vám upozornění 5 dnů před koncem zkušební doby.", @@ -2197,7 +2178,6 @@ "body-queries-expressions-configured": "Vytvořte alespoň jeden dotaz nebo výraz, na který chcete být upozorněni", "confirmText-deactivate": "Deaktivovat", "expressions": "Výrazy", - "loading-data-sources": "Načítání zdrojů dat…", "manipulate-returned-queries-other-operations": "Spravujte data získaná z dotazů pomocí matematických a dalších operací.", "message": { "a-valid-expression-is-required": "Je vyžadován platný výraz" @@ -3284,7 +3264,7 @@ "define-allowed-teams-ids-label": "Definovat povolená ID týmů", "display-name-description": "Zobrazí se na přihlašovací stránce jako „Přihlásit se přes…“. Užitečné, pokud používáte více než jednoho poskytovatele identity nebo protokoly SSO.", "display-name-label": "Zobrazované jméno", - "domain-hint-description": "Parametr pro označení oblasti uživatele v instanci Azure AD / Entra ID a zjednodušení procesu přihlášení.", + "domain-hint-description": "", "domain-hint-label": "Nápověda k doméně", "domain-hint-valid-domain": "Toto pole musí být platná doména.", "email-attribute-name-description": "Název klíče, který se použije pro vyhledání e-mailu uživatele v mapě atributů ID tokenu OAuth2.", @@ -3540,6 +3520,7 @@ "move-modal-field-label": "Název složky", "move-modal-text": "Tato akce přesune následující obsah:", "move-modal-title": "Přesunout", + "move-provisioned-folder": "", "moving": "Probíhá přesouvání…", "new-folder-name-required-phrase": "Název složky je povinný.", "selected-mix-resources-modal-text": "Vybrali jste přidělené i nepřidělené zdroje. Tyto zdroje nelze zpracovat společně. Vyberte pouze přidělené nebo nepřidělené zdroje a zkuste to znovu.", @@ -8762,6 +8743,9 @@ "tooltip-hide": "Skrýt heslo", "tooltip-show": "Zobrazit heslo" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Rozbalit řádek", "collapse": "Sbalit řádek", @@ -8790,6 +8774,9 @@ "series-color-picker-popover": { "y-axis-usage": "Použít pravou osu y" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Načítání" }, @@ -10248,7 +10235,7 @@ "title": "Zdroje dat" }, "databases": { - "title": "Databáze" + "title": "" }, "datasources": { "subtitle": "Přidávejte a konfigurujte zdroje dat", @@ -11792,6 +11779,8 @@ "button-cancelling": "", "button-next": "Dokončit", "button-start": "Zahájit synchronizaci", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Tímto odstraníte konfiguraci úložiště a přijdete o veškerý pokrok. Opravdu chcete zahodit změny?", "confirm": "Ano, zahodit", @@ -12176,8 +12165,8 @@ "title": "Vyberte rozsahy" }, "tree": { - "collapse": "Sbalit", - "expand": "Rozbalit", + "collapse": "", + "expand": "", "headline": { "noResults": "Nebyly nalezeny žádné výsledky pro váš dotaz", "recommended": "Doporučeno", diff --git a/public/locales/de-DE/grafana.json b/public/locales/de-DE/grafana.json index 4446e4ecdcb..1bc8591394c 100644 --- a/public/locales/de-DE/grafana.json +++ b/public/locales/de-DE/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Zugriff verweigert" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Einige Funktionen sind stabil (GA) und standardmäßig aktiviert, während andere zurzeit in ihrer vorläufigen Beta-Phase sind und für eine frühzeitige Anwendung verfügbar sind.", - "confirm-modal-body-2": "Wir raten Ihnen, die Auswirkungen jeder Funktionsänderung zu kennen, bevor Sie Änderungen vornehmen.", - "confirmText-save-changes": "Änderungen speichern", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Allgemeine Verfügbarkeit", - "deprecated": "Veraltet", - "ga": "GA" - }, - "save-changes": "Änderungen speichern", - "saving": "Wird gespeichert …", - "title-apply-feature-toggle-changes": "Änderungen des Funktionsschalters anwenden" - }, "admin-orgs-table": { "aria-label-delete-org": "Organisation löschen", "confirmText-delete": "Löschen", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Dashboards je nach Beliebtheit beim Suchen sortieren", "title-team-sync": "Teamsynchronisierung" }, - "feature-toggles": { - "restart-pending": "Ein Neustart für Ihre Grafana-Instanz steht bevor, damit die neuesten Änderungen am Funktionsschalter angewendet werden", - "restart-required": "Das Speichern von Änderungen am Funktionsschalter führt zu einem Neustart der Instanz, der einige Minuten dauern kann", - "sub-title": "Zeige die Funktionsschalter an und bearbeite sie. Unter <2>grafana.com erfährst du mehr über Funktionsschalter." - }, "get-enterprise": { "contact-us": "Kontaktieren Sie uns, um eine kostenlose Testversion zu erhalten.", "description": "Sie können die Testversion 30 Tage lang kostenlos nutzen. Wir erinnern Sie fünf Tage vor Ablauf der Testversion nochmals.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Erstellen Sie mindestens eine Abfrage oder einen Ausdruck für eine Warnung", "confirmText-deactivate": "Deaktivieren", "expressions": "Ausdrücke", - "loading-data-sources": "Datenquellen werden geladen ...", "manipulate-returned-queries-other-operations": "Manipulieren Sie Daten, die von Abfragen mit mathematischen und anderen Operationen zurückgegeben werden.", "message": { "a-valid-expression-is-required": "Ein gültiger Ausdruck ist erforderlich" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Erlaubte Team-IDs festlegen", "display-name-description": "Wird auf der Anmeldeseite als „Anmelden mit …“ angezeigt. Hilfreich, wenn Sie mehr als einen Identitätsanbieter oder SSO-Protokolle nutzen.", "display-name-label": "Anzeigename", - "domain-hint-description": "Parameter zur Angabe des Bereichs des Nutzers im Azure AD/Entra ID-Tenant und zur Optimierung des Anmeldeprozesses.", + "domain-hint-description": "", "domain-hint-label": "Domain-Hinweis", "domain-hint-valid-domain": "Dieses Feld muss eine gültige Domain sein.", "email-attribute-name-description": "Name des Keys, der für die Nutzer-E-Mail-Suche in der Attributzuordnung des OAuth2-ID-Tokens verwendet wird.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Ordnername", "move-modal-text": "Diese Aktion wird folgenden Inhalt verschieben:", "move-modal-title": "Verschieben", + "move-provisioned-folder": "", "moving": "Verschieben...", "new-folder-name-required-phrase": "Ordnername ist erforderlich.", "selected-mix-resources-modal-text": "Sie haben sowohl bereitgestellte als auch nicht bereitgestellte Ressourcen ausgewählt. Diese können nicht zusammen verarbeitet werden. Bitte wählen Sie nur bereitgestellte Ressourcen oder nur nicht bereitgestellte Ressourcen aus und versuchen Sie es dann erneut.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Passwort verbergen", "tooltip-show": "Passwort anzeigen" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Zeile ausklappen", "collapse": "Zeile einklappen", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Rechte y-Achse verwenden" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Wird geladen" }, @@ -10186,7 +10173,7 @@ "title": "Datenquellen" }, "databases": { - "title": "Datenbanken" + "title": "" }, "datasources": { "subtitle": "Füge Datenquellen hinzu und konfiguriere sie", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Fertigstellen", "button-start": "Synchronisierung starten", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Dadurch wird die Repository-Konfiguration gelöscht und Sie verlieren alle Fortschritte. Sind Sie sicher, dass Sie Ihre Änderungen verwerfen möchten?", "confirm": "Ja, verwerfen", @@ -12090,8 +12079,8 @@ "title": "Bereiche auswählen" }, "tree": { - "collapse": "Einklappen", - "expand": "Ausklappen", + "collapse": "", + "expand": "", "headline": { "noResults": "Keine Ergebnisse für deine Abfrage gefunden", "recommended": "Empfohlen", diff --git a/public/locales/en-US/grafana.json b/public/locales/en-US/grafana.json index 93340dcbcc7..ed0738c373a 100644 --- a/public/locales/en-US/grafana.json +++ b/public/locales/en-US/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Access denied" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Some features are stable (GA) and enabled by default, whereas some are currently in their preliminary Beta phase, available for early adoption.", - "confirm-modal-body-2": "We advise understanding the implications of each feature change before making modifications.", - "confirmText-save-changes": "Save changes", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "General availability", - "deprecated": "Deprecated", - "ga": "GA" - }, - "save-changes": "Save changes", - "saving": "Saving...", - "title-apply-feature-toggle-changes": "Apply feature toggle changes" - }, "admin-orgs-table": { "aria-label-delete-org": "Delete org", "confirmText-delete": "Delete", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Sort dashboards by popularity in search", "title-team-sync": "Team Sync" }, - "feature-toggles": { - "restart-pending": "A restart is pending for your Grafana instance to apply the latest feature toggle changes", - "restart-required": "Saving feature toggle changes will prompt a restart of the instance, which may take a few minutes", - "sub-title": "View and edit feature toggles. Read more about feature toggles at <2>grafana.com." - }, "get-enterprise": { "contact-us": "Contact us and get a free trial", "description": "You can use the trial version for free for 30 days. We will remind you about it five days before the trial period ends.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Create at least one query or expression to be alerted on", "confirmText-deactivate": "Deactivate", "expressions": "Expressions", - "loading-data-sources": "Loading data sources...", "manipulate-returned-queries-other-operations": "Manipulate data returned from queries with math and other operations.", "message": { "a-valid-expression-is-required": "A valid expression is required" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Define allowed teams IDs", "display-name-description": "Will be displayed on the login page as \"Sign in with ...\". Helpful if you use more than one identity providers or SSO protocols.", "display-name-label": "Display name", - "domain-hint-description": "Parameter to indicate the realm of the user in the Azure AD/Entra ID tenant and streamline the login process.", + "domain-hint-description": "Parameter to indicate the realm of the user in the Entra ID tenant and streamline the login process.", "domain-hint-label": "Domain hint", "domain-hint-valid-domain": "This field must be a valid domain.", "email-attribute-name-description": "Name of the key to use for user email lookup within the attributes map of OAuth2 ID token.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Folder name", "move-modal-text": "This action will move the following content:", "move-modal-title": "Move", + "move-provisioned-folder": "Move provisioned folder", "moving": "Moving...", "new-folder-name-required-phrase": "Folder name is required.", "selected-mix-resources-modal-text": "You have selected both provisioned and non-provisioned resources. These cannot be processed together. Please select only provisioned resources or only non-provisioned resources and try again.", @@ -4161,6 +4142,13 @@ "body": "Try the new Advisor to uncover potential issues with your data sources and plugins.", "go-to-advisor": "Go to Advisor" }, + "cache-feature-highlight-page": { + "header": "Query caching can improve load times and reduce API costs by temporarily storing the results of data source queries. When you or other users submit the same query, the results will come back from the cache instead of from the data source.", + "item-1": "Faster dashboard load times, especially for popular dashboards.", + "item-2": "Reduced API costs.", + "item-3": "Reduced likelihood that APIs will rate-limit or throttle requests.", + "title": "Optimize queries with Query Caching in Grafana Cloud" + }, "cloud": { "connections-home-page": { "add-new-connection": { @@ -4209,6 +4197,20 @@ "unknown-datasource": "Unknown datasource" } }, + "feature-highlight-page": { + "foot-note": "After creating an account, you can easily <2>migrate this instance to Grafana Cloud with our Migration Assistant.", + "footer": "Create a Grafana Cloud Free account to start using data source permissions. This feature is also available with a Grafana Enterprise license.", + "footer-link": "Learn about Enterprise", + "link-button-label": "Create account" + }, + "insights-feature-highlight-page": { + "header": "Usage Insights provide detailed information about data source usage, like the number of views, queries, and errors users have experienced. You can use this to improve users’ experience and troubleshoot issues.", + "item-1": "Demonstrate and improve the value of your observability service by keeping track of user engagement", + "item-2": "Keep Grafana performant and by identifying and fixing slow, error-prone data sources", + "item-3": "Clean up your instance by finding and removing unused data sources", + "item-4": "Review individual data source usage insights at a glance in the UI, sort search results by usage and errors, or dig into detailed usage logs", + "title": "Understand usage of your data sources with Grafana Cloud" + }, "new-data-source-page": { "subTitle": { "choose-a-data-source-type": "Choose a data source type" @@ -4239,6 +4241,13 @@ "subtitle": "Manage your data source connections in one place. Use this page to add a new data source or manage your existing connections." } }, + "permissions-feature-highlight-page": { + "header": "With data source permissions, you can protect sensitive data by limiting access to this data source to specific users, teams, and roles.", + "item-1": "Protect sensitive data, like security logs, production databases, and personally-identifiable information", + "item-2": "Clean up users’ experience by hiding data sources they don’t need to use", + "item-3": "Share Grafana access more freely, knowing that users will not unwittingly see sensitive data", + "title": "Secure access to data with data source permissions in Grafana Cloud" + }, "search": { "aria-label-search-all": "Search all", "placeholder": "Search all" @@ -8716,6 +8725,9 @@ "tooltip-hide": "Hide password", "tooltip-show": "Show password" }, + "range-slider": { + "drag-handle-aria-label": "Use arrow keys to change the value" + }, "row-expander": { "aria-label-expand": "Expand row", "collapse": "Collapse row", @@ -8744,6 +8756,9 @@ "series-color-picker-popover": { "y-axis-usage": "Use right y-axis" }, + "slider": { + "drag-handle-aria-label": "Use arrow keys to change the value" + }, "spinner": { "aria-label": "Loading" }, @@ -10186,7 +10201,7 @@ "title": "Data sources" }, "databases": { - "title": "Databases" + "title": "Database" }, "datasources": { "subtitle": "Add and configure data sources", @@ -11372,10 +11387,10 @@ "all-resources-managed_one": "All {{count}} resource is managed", "all-resources-managed_other": "All {{count}} resources are managed", "no-results-matching-your-query": "No results matching your query", - "partial-managed": "{{managedCount}}/{{resourceCount}} resources managed.", + "partial-managed": "{{managedCount}}/{{resourceCount}} resources managed by Git sync.", "placeholder-search": "Search", - "unmanaged-resources_one": "{{count}} resource isn't managed as code yet.", - "unmanaged-resources_other": "{{count}} resources aren't managed as code yet." + "unmanaged-resources_one": "{{count}} resource isn't managed by git sync.", + "unmanaged-resources_other": "{{count}} resources aren't managed by git sync." }, "get-default-values": { "title": { @@ -11712,6 +11727,8 @@ "button-cancelling": "Cancelling...", "button-next": "Finish", "button-start": "Begin synchronization", + "check-status-button": "Check repository status", + "check-status-message": "Repository connecting, synchronize will be ready soon.", "discard-modal": { "body": "This will delete the repository configuration and you will lose all progress. Are you sure you want to discard your changes?", "confirm": "Yes, discard", @@ -12090,8 +12107,8 @@ "title": "Select scopes" }, "tree": { - "collapse": "Collapse", - "expand": "Expand", + "collapse": "Collapse {{title}}", + "expand": "Expand {{title}}", "headline": { "noResults": "No results found for your query", "recommended": "Recommended", diff --git a/public/locales/es-ES/grafana.json b/public/locales/es-ES/grafana.json index cb343fb25c8..4c49f7e259e 100644 --- a/public/locales/es-ES/grafana.json +++ b/public/locales/es-ES/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Acceso denegado" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Algunas características son estables (GA) y están habilitadas de forma predeterminada, mientras que otras se encuentran actualmente en su fase beta preliminar, disponibles para su adopción temprana.", - "confirm-modal-body-2": "Aconsejamos comprender las implicaciones de cada cambio de característica antes de realizar modificaciones.", - "confirmText-save-changes": "Guardar cambios", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Disponibilidad general", - "deprecated": "Obsoleto", - "ga": "GA" - }, - "save-changes": "Guardar cambios", - "saving": "Guardando…", - "title-apply-feature-toggle-changes": "Aplicar cambios de conmutación de características" - }, "admin-orgs-table": { "aria-label-delete-org": "Eliminar organización", "confirmText-delete": "Eliminar", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Ordenar los dashboards por popularidad en la búsqueda", "title-team-sync": "Sincronización de equipos" }, - "feature-toggles": { - "restart-pending": "Hay un reinicio pendiente para que la instancia de Grafana aplique los últimos cambios de conmutación de características", - "restart-required": "Guardar los cambios de alternancia de funciones provocará un reinicio de la instancia, que puede tardar unos minutos", - "sub-title": "Consulta y edita los conmutadores de características. Conoce más información sobre los conmutadores de características en <2>grafana.com." - }, "get-enterprise": { "contact-us": "Ponte en contacto con nosotros y solicita una prueba gratuita", "description": "Puedes utilizar la versión de prueba de forma gratuita durante 30 días. Cinco días antes de que finalice el periodo de prueba te lo recordaremos.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Crea al menos una consulta o expresión sobre la que alertar", "confirmText-deactivate": "Desactivar", "expressions": "Expresiones", - "loading-data-sources": "Cargando fuentes de datos...", "manipulate-returned-queries-other-operations": "Manipula los datos devueltos por las consultas con operaciones matemáticas y de otro tipo.", "message": { "a-valid-expression-is-required": "Se requiere una expresión válida" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Definir ID de equipos permitidos", "display-name-description": "Se mostrará en la página de inicio de sesión como «Iniciar sesión con...». Resulta útil si utiliza más de un proveedor de identidad o protocolos SSO.", "display-name-label": "Nombre para mostrar", - "domain-hint-description": "Parámetro para indicar el ámbito del usuario en el inquilino de Azure AD/Entra ID y agilizar el proceso de inicio de sesión.", + "domain-hint-description": "", "domain-hint-label": "Sugerencia de dominio", "domain-hint-valid-domain": "Este campo debe ser un dominio válido.", "email-attribute-name-description": "Nombre de la clave que se utilizará para la búsqueda de correo electrónico del usuario dentro del mapa de atributos del token de identificación OAuth2.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Nombre de la carpeta", "move-modal-text": "Esta acción moverá el siguiente contenido:", "move-modal-title": "Mover", + "move-provisioned-folder": "", "moving": "Moviendo...", "new-folder-name-required-phrase": "Se requiere el nombre de la carpeta.", "selected-mix-resources-modal-text": "Has seleccionado recursos aprovisionados y no aprovisionados. No se pueden procesar juntos. Selecciona solo recursos aprovisionados o solo recursos no aprovisionados e inténtalo de nuevo.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Ocultar contraseña", "tooltip-show": "Mostrar contraseña" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Expandir fila", "collapse": "Contraer fila", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Usar eje y derecho" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Cargando" }, @@ -10186,7 +10173,7 @@ "title": "Orígenes de datos" }, "databases": { - "title": "Bases de datos" + "title": "" }, "datasources": { "subtitle": "Añadir y configurar orígenes de datos", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Terminar", "button-start": "Iniciar la sincronización", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Esto eliminará la configuración del repositorio y se perderá todo el progreso. ¿Seguro que quieres descartar los cambios?", "confirm": "Sí, descartar", @@ -12090,8 +12079,8 @@ "title": "Seleccionar ámbitos" }, "tree": { - "collapse": "Contraer", - "expand": "Expandir", + "collapse": "", + "expand": "", "headline": { "noResults": "No se han encontrado resultados para tu consulta", "recommended": "Recomendado", diff --git a/public/locales/fr-FR/grafana.json b/public/locales/fr-FR/grafana.json index 680870bb298..b88e35f8c48 100644 --- a/public/locales/fr-FR/grafana.json +++ b/public/locales/fr-FR/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Accès refusé" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Certaines fonctionnalités sont stables (GA) et activées par défaut, tandis que d’autres sont actuellement dans leur phase bêta préliminaire, disponibles pour une adoption précoce.", - "confirm-modal-body-2": "Nous vous conseillons de comprendre les implications de chaque changement de fonctionnalité avant d’apporter des modifications.", - "confirmText-save-changes": "Enregistrer les modifications", - "get-stage-cell": { - "beta": "Bêta", - "content-general-availability": "Disponibilité générale", - "deprecated": "Obsolète", - "ga": "GA" - }, - "save-changes": "Enregistrer les modifications", - "saving": "Enregistrement en cours...", - "title-apply-feature-toggle-changes": "Appliquer les modifications de basculement de fonctionnalité" - }, "admin-orgs-table": { "aria-label-delete-org": "Supprimer l’organisation", "confirmText-delete": "Supprimer", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Trier les tableaux de bord par popularité dans la recherche", "title-team-sync": "Synchronisation d’équipe" }, - "feature-toggles": { - "restart-pending": "Un redémarrage est en attente pour que votre instance Grafana applique les dernières modifications de basculement de fonctionnalité", - "restart-required": "L’enregistrement des modifications de basculement de fonctionnalité entraînera un redémarrage de l’instance, ce qui peut prendre quelques minutes", - "sub-title": "Afficher et modifier les commutateurs de fonctionnalités. Pour en savoir plus sur les commutateurs de fonctionnalités, consultez <2>grafana.com." - }, "get-enterprise": { "contact-us": "Contactez-nous et profitez d'un essai gratuit", "description": "Vous pouvez utiliser la version d'essai gratuitement pendant 30 jours. Cinq jours avant la fin de la période d'essai, nous vous enverrons un rappel à ce sujet.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Créer au moins une requête ou une expression pour laquelle une alerte doit être émise", "confirmText-deactivate": "Désactiver", "expressions": "Expressions", - "loading-data-sources": "Chargement des sources de données…", "manipulate-returned-queries-other-operations": "Manipulez les données renvoyées par les requêtes avec des opérations mathématiques et autres.", "message": { "a-valid-expression-is-required": "Une expression valide est obligatoire" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Définir les ID des équipes autorisées", "display-name-description": "Sera affiché sur la page de connexion comme « Se connecter avec... ». Utile si vous utilisez plusieurs fournisseurs d’identité ou protocoles SSO.", "display-name-label": "Pseudo", - "domain-hint-description": "Paramètre permettant d’indiquer le domaine de l’utilisateur dans le locataire Azure AD/Entra ID pour simplifier le processus de connexion.", + "domain-hint-description": "", "domain-hint-label": "Indice de domaine", "domain-hint-valid-domain": "Ce champ doit contenir un domaine valide.", "email-attribute-name-description": "Nom de la clé à utiliser pour la recherche d’e-mail de l’utilisateur dans la carte d’attributs du jeton d’identification OAuth2.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Nom du dossier", "move-modal-text": "Cette action déplacera le contenu suivant :", "move-modal-title": "Déplacer", + "move-provisioned-folder": "", "moving": "Déplacement...", "new-folder-name-required-phrase": "Vous devez saisir le nom du dossier.", "selected-mix-resources-modal-text": "Vous avez sélectionné à la fois des ressources provisionnées et non provisionnées. Ces deux types ne peuvent être traités simultanément. Veuillez sélectionner uniquement des ressources provisionnées ou uniquement des ressources non provisionnées, puis réessayer.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Masquer le mot de passe", "tooltip-show": "Afficher le mot de passe" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Développer la ligne", "collapse": "Réduire la ligne", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Utiliser l'axe y droit" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Chargement en cours" }, @@ -10186,7 +10173,7 @@ "title": "Sources de données" }, "databases": { - "title": "Bases de données" + "title": "" }, "datasources": { "subtitle": "Ajouter et configurer des sources de données", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Terminer", "button-start": "Commencer la synchronisation", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Cette action supprimera la configuration du dépôt et vous perdrez toutes vos modifications. Voulez-vous vraiment annuler vos modifications ?", "confirm": "Oui, annuler", @@ -12090,8 +12079,8 @@ "title": "Sélectionner les portées" }, "tree": { - "collapse": "Réduire", - "expand": "Développer", + "collapse": "", + "expand": "", "headline": { "noResults": "Aucun résultat n'a été trouvé pour votre requête", "recommended": "Recommandé", diff --git a/public/locales/hu-HU/grafana.json b/public/locales/hu-HU/grafana.json index 6fa46501085..82ca4a86f87 100644 --- a/public/locales/hu-HU/grafana.json +++ b/public/locales/hu-HU/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Hozzáférés megtagadva" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Egyes funkciók stabilak (GA), és alapértelmezés szerint engedélyezve vannak, míg mások jelenleg előzetes béta fázisban vannak, és korai bevezetéshez állnak rendelkezésre.", - "confirm-modal-body-2": "Javasoljuk, hogy a módosítások végrehajtása előtt ismerje meg az egyes funkciók módosításának következményeit.", - "confirmText-save-changes": "Beállítások mentése", - "get-stage-cell": { - "beta": "Béta", - "content-general-availability": "Általános elérhetőség", - "deprecated": "Elavult", - "ga": "GA" - }, - "save-changes": "Beállítások mentése", - "saving": "Mentés...", - "title-apply-feature-toggle-changes": "Funkcióváltás módosításainak alkalmazása" - }, "admin-orgs-table": { "aria-label-delete-org": "Szervezet törlése", "confirmText-delete": "Törlés", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Irányítópultok rendezése népszerűség szerint a keresésben", "title-team-sync": "Csapat szinkronizálása" }, - "feature-toggles": { - "restart-pending": "A Grafana-példány újraindítása függőben van a legújabb funkcióváltási módosítások alkalmazásához", - "restart-required": "A funkcióváltási módosítások mentése a példány újraindításával jár, ami néhány percet vehet igénybe", - "sub-title": "Funkcióváltók megtekintése és szerkesztése. A funkciók közötti váltásról bővebben a <2>grafana.com oldalon olvashat." - }, "get-enterprise": { "contact-us": "Vegye fel velünk a kapcsolatot, és kérjen ingyenes próbaverziót", "description": "A próbaverziót 30 napig ingyenesen használhatja. Öt nappal a próbaidőszak vége előtt emlékeztetni fogjuk erre.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Hozzon létre legalább egy lekérdezést vagy kifejezést, amelyre riasztást kap", "confirmText-deactivate": "Deaktiválás", "expressions": "Kifejezések", - "loading-data-sources": "Adatforrások betöltése…", "manipulate-returned-queries-other-operations": "A lekérdezésekből visszaadott adatok manipulálása matematikai és egyéb műveletekkel.", "message": { "a-valid-expression-is-required": "Érvényes kifejezés szükséges" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Engedélyezett csapatazonosítók meghatározása", "display-name-description": "A bejelentkezési oldalon „Bejelentkezés ezzel” felirattal jelenik meg. Hasznos, ha egynél több identitásszolgáltatót vagy SSO-protokollt használ.", "display-name-label": "Megjelenített név", - "domain-hint-description": "Paraméter, amely jelzi a felhasználó tartományát az Azure AD/Entra ID bérlőben, és egyszerűsíti a bejelentkezést.", + "domain-hint-description": "", "domain-hint-label": "Tipp a domainhez", "domain-hint-valid-domain": "Ebben a mezőben csak érvényes domain szerepelhet.", "email-attribute-name-description": "Az OAuth2-azonosítótoken attribútumtérképén belül a felhasználói e-mail-kereséshez használandó kulcs neve.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Mappa neve", "move-modal-text": "Ez a művelet áthelyezi a következő tartalmat:", "move-modal-title": "Áthelyezés", + "move-provisioned-folder": "", "moving": "Áthelyezés...", "new-folder-name-required-phrase": "A mappa nevének megadása kötelező.", "selected-mix-resources-modal-text": "Kiépített és nem kiépített erőforrásokat is kijelölt. Ezeket nem lehet egyszerre feldolgozni. Kérjük, válasszon csak kiépített vagy csak nem kiépített erőforrásokat, majd próbálja újra.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Jelszó elrejtése", "tooltip-show": "Jelszó megjelenítése" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Sor kibontása", "collapse": "Sor összecsukása", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Jobb y tengely használata" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Betöltés" }, @@ -10186,7 +10173,7 @@ "title": "Adatforrások" }, "databases": { - "title": "Adatbázisok" + "title": "" }, "datasources": { "subtitle": "Adatforrások hozzáadása és konfigurálása", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Befejezés", "button-start": "Szinkronizálás indítása", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Ez törli az adattár konfigurációját, és az összes előrehaladás el fog veszni. Biztosan elveti a módosításokat?", "confirm": "Igen, elvetés", @@ -12090,8 +12079,8 @@ "title": "Hatókörök kijelölése" }, "tree": { - "collapse": "Összecsukás", - "expand": "Kibontás", + "collapse": "", + "expand": "", "headline": { "noResults": "Nincs találat a lekérdezésre", "recommended": "Ajánlott", diff --git a/public/locales/id-ID/grafana.json b/public/locales/id-ID/grafana.json index fb1006a56ac..004a4b57582 100644 --- a/public/locales/id-ID/grafana.json +++ b/public/locales/id-ID/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Akses ditolak" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Beberapa fitur stabil (GA) dan diaktifkan secara default, sedangkan beberapa fitur lainnya saat ini dalam fase Beta awal, tersedia untuk penggunaan awal.", - "confirm-modal-body-2": "Sebaiknya pahami implikasi dari setiap perubahan fitur sebelum melakukan modifikasi.", - "confirmText-save-changes": "Simpan perubahan", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Ketersediaan umum", - "deprecated": "Usang", - "ga": "GA" - }, - "save-changes": "Simpan perubahan", - "saving": "Menyimpan...", - "title-apply-feature-toggle-changes": "Terapkan perubahan tombol fitur" - }, "admin-orgs-table": { "aria-label-delete-org": "Hapus org", "confirmText-delete": "Hapus", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Urutkan dasbor berdasarkan popularitas dalam pencarian", "title-team-sync": "Sinkronisasi Tim" }, - "feature-toggles": { - "restart-pending": "Restart menunggu instans Grafana Anda menerapkan perubahan tombol alih fitur terbaru", - "restart-required": "Menyimpan perubahan tombol fitur akan meminta mulai ulang instans, yang mungkin memerlukan beberapa menit", - "sub-title": "Lihat dan edit tombol fitur. Baca selengkapnya tombol fitur di <2>grafana.com." - }, "get-enterprise": { "contact-us": "Hubungi kami dan dapatkan uji coba gratis", "description": "Anda dapat menggunakan versi uji coba secara gratis selama 30 hari. Kami akan mengingatkan Anda tentang hal itu lima hari sebelum periode uji coba berakhir.", @@ -2173,7 +2154,6 @@ "body-queries-expressions-configured": "Buat setidaknya satu kueri atau pola untuk diperingatkan", "confirmText-deactivate": "Nonaktifkan", "expressions": "Pola", - "loading-data-sources": "Memuat sumber data...", "manipulate-returned-queries-other-operations": "Manipulasi data yang dikembalikan dari kueri dengan matematika dan operasi lainnya.", "message": { "a-valid-expression-is-required": "Persamaan yang valid diperlukan" @@ -3248,7 +3228,7 @@ "define-allowed-teams-ids-label": "Tentukan ID tim yang diizinkan", "display-name-description": "Akan ditampilkan di halaman login sebagai \"Masuk dengan ...\". Berguna jika Anda menggunakan lebih dari satu penyedia identitas atau protokol SSO.", "display-name-label": "Nama tampilan", - "domain-hint-description": "Parameter untuk menunjukkan domain pengguna di penyewa Azure AD/Entra ID dan menyederhanakan proses masuk.", + "domain-hint-description": "", "domain-hint-label": "Petunjuk domain", "domain-hint-valid-domain": "Bidang ini harus berupa domain yang valid.", "email-attribute-name-description": "Nama kunci yang akan digunakan untuk pencarian email pengguna dalam peta atribut token ID OAuth2.", @@ -3504,6 +3484,7 @@ "move-modal-field-label": "Nama folder", "move-modal-text": "Tindakan ini akan memindahkan konten berikut:", "move-modal-title": "Pindahkan", + "move-provisioned-folder": "", "moving": "Memindahkan...", "new-folder-name-required-phrase": "Nama folder wajib diisi.", "selected-mix-resources-modal-text": "Anda telah memilih sumber daya yang disediakan dan tidak disediakan. Keduanya tidak dapat diproses sekaligus. Hanya pilih sumber daya yang disediakan atau hanya sumber daya yang tidak disediakan dan coba lagi.", @@ -8693,6 +8674,9 @@ "tooltip-hide": "Sembunyikan kata sandi", "tooltip-show": "Tampilkan kata sandi" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Perbesar baris", "collapse": "Ciutkan baris", @@ -8721,6 +8705,9 @@ "series-color-picker-popover": { "y-axis-usage": "Gunakan sumbu y kanan" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Memuat" }, @@ -10155,7 +10142,7 @@ "title": "Sumber data" }, "databases": { - "title": "Database" + "title": "" }, "datasources": { "subtitle": "Tambahkan dan konfigurasikan sumber data", @@ -11672,6 +11659,8 @@ "button-cancelling": "", "button-next": "Selesai", "button-start": "Mulai sinkronisasi", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Ini akan menghapus konfigurasi repositori dan Anda akan kehilangan semua kemajuan. Yakin ingin membuang perubahan Anda?", "confirm": "Ya, buang", @@ -12047,8 +12036,8 @@ "title": "Pilih cakupan" }, "tree": { - "collapse": "Ciutkan", - "expand": "Perluas", + "collapse": "", + "expand": "", "headline": { "noResults": "Hasil untuk kueri Anda tidak ditemukan", "recommended": "Disarankan", diff --git a/public/locales/it-IT/grafana.json b/public/locales/it-IT/grafana.json index 4742503404c..167081837d0 100644 --- a/public/locales/it-IT/grafana.json +++ b/public/locales/it-IT/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Accesso negato" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Alcune funzionalità sono stabili (GA) e attive per impostazione predefinita, mentre altre sono ancora in fase beta preliminare e disponibili su richiesta in anteprima.", - "confirm-modal-body-2": "Si consiglia di comprendere le implicazioni di ogni modifica delle funzionalità prima di applicarle.", - "confirmText-save-changes": "Salva modifiche", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Disponibilità generale", - "deprecated": "Obsoleto", - "ga": "GA" - }, - "save-changes": "Salva modifiche", - "saving": "Salvataggio in corso...", - "title-apply-feature-toggle-changes": "Applica le modifiche al toggle delle funzionalità" - }, "admin-orgs-table": { "aria-label-delete-org": "Elimina org", "confirmText-delete": "Elimina", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Ordina le dashboard in base alla popolarità nella ricerca", "title-team-sync": "Sincronizzazione del team" }, - "feature-toggles": { - "restart-pending": "È in sospeso un riavvio per l'istanza Grafana per applicare le ultime modifiche al toggle delle funzionalità", - "restart-required": "Il salvataggio delle modifiche al toggle delle funzionalità richiederà il riavvio dell'istanza, che potrebbe richiedere alcuni minuti", - "sub-title": "Visualizza e modifica gli interruttori della funzionalità. Scopri di più sugli interruttori della funzionalità su <2>grafana.com." - }, "get-enterprise": { "contact-us": "Contattaci e ricevi una prova gratuita", "description": "Puoi utilizzare la versione di prova gratuitamente per 30 giorni. Te lo ricorderemo cinque giorni prima della fine del periodo di prova.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Crea almeno una query o un'espressione su cui ricevere un avviso", "confirmText-deactivate": "Disattiva", "expressions": "Espressioni", - "loading-data-sources": "Caricamento delle origini dei dati in corso...", "manipulate-returned-queries-other-operations": "Modifica i dati che trovi nelle query usando operazioni matematiche e di altro tipo.", "message": { "a-valid-expression-is-required": "È richiesta un'espressione valida" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Definisci gli ID dei team consentiti", "display-name-description": "Verrà visualizzato nella pagina di accesso come \"Accedi con...\". Utile se utilizzi più di un provider di identità o protocolli SSO.", "display-name-label": "Visualizza nome", - "domain-hint-description": "Parametro per indicare il dominio dell'utente nel tenant Azure AD/Entra ID e semplificare il processo di accesso.", + "domain-hint-description": "", "domain-hint-label": "Suggerimento dominio", "domain-hint-valid-domain": "Questo campo deve essere un dominio valido.", "email-attribute-name-description": "Nome della chiave da utilizzare per la ricerca dell'e-mail dell'utente all'interno della mappa degli attributi del token ID di OAuth2.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Nome cartella", "move-modal-text": "Questa azione sposterà il seguente contenuto:", "move-modal-title": "Sposta", + "move-provisioned-folder": "", "moving": "Spostamento in corso...", "new-folder-name-required-phrase": "Il nome della cartella è obbligatorio.", "selected-mix-resources-modal-text": "Hai selezionato sia risorse sottoposte a provisioning che risorse non sottoposte a provisioning. Non possono essere elaborate insieme. Seleziona solo risorse sottoposte a provisioning o solo risorse non sottoposte a provisioning e riprova.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Nascondi password", "tooltip-show": "Mostra password" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Espandi riga", "collapse": "Riduci riga", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Usa l'asse y destro" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Caricamento" }, @@ -10186,7 +10173,7 @@ "title": "Fonti dei dati" }, "databases": { - "title": "Database" + "title": "" }, "datasources": { "subtitle": "Aggiungi e configura origini dati", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Fine", "button-start": "Inizia la sincronizzazione", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Questa operazione eliminerà la configurazione del repository e perderai tutti i progressi. Vuoi davvero eliminare le modifiche?", "confirm": "Sì, elimina", @@ -12090,8 +12079,8 @@ "title": "Seleziona gli ambiti" }, "tree": { - "collapse": "Riduci", - "expand": "Espandi", + "collapse": "", + "expand": "", "headline": { "noResults": "Nessun risultato trovato per la ricerca", "recommended": "Consigliato", diff --git a/public/locales/ja-JP/grafana.json b/public/locales/ja-JP/grafana.json index a6259267cb2..b5a187415c6 100644 --- a/public/locales/ja-JP/grafana.json +++ b/public/locales/ja-JP/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "アクセス拒否" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "一部機能は安定版(GA)として標準で有効になっていますが、いくつかの機能は現在暫定的なベータ版の段階であり、早期導入することが可能です。", - "confirm-modal-body-2": "変更する前に、各機能の変更がもたらす影響を理解することをお勧めします。", - "confirmText-save-changes": "変更を保存", - "get-stage-cell": { - "beta": "ベータ版", - "content-general-availability": "一般提供", - "deprecated": "非推奨", - "ga": "GA" - }, - "save-changes": "変更を保存", - "saving": "保存中…", - "title-apply-feature-toggle-changes": "機能の切り替え変更を適用する" - }, "admin-orgs-table": { "aria-label-delete-org": "組織を削除", "confirmText-delete": "削除", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "検索時に人気度順にダッシュボードを並べ替え", "title-team-sync": "チーム同期" }, - "feature-toggles": { - "restart-pending": "最新の機能切り替えの変更を適用するために、Grafanaインスタンスの再起動は保留中です", - "restart-required": "機能切り替えの変更を保存すると、インスタンスの再起動が必要となります。これには数分かかる場合があります", - "sub-title": "機能トグルを表示および編集します。機能トグルの詳細については、<2>grafana.comをご覧ください。" - }, "get-enterprise": { "contact-us": "お問い合わせいただくと、無料トライアルをご利用いただけます", "description": "トライアル版は30日間無料でご利用いただけます。試用期間が終了する5日前に、その旨をお知らせします。", @@ -2173,7 +2154,6 @@ "body-queries-expressions-configured": "アラート対象となるクエリまたは式を少なくとも1つ作成してください", "confirmText-deactivate": "無効化する", "expressions": "式", - "loading-data-sources": "データソース読み込み中...", "manipulate-returned-queries-other-operations": "数式やその他の演算を使用してクエリから返されたデータを操作します。", "message": { "a-valid-expression-is-required": "有効な式の入力が必要です" @@ -3248,7 +3228,7 @@ "define-allowed-teams-ids-label": "許可されたチームIDを定義する", "display-name-description": "ログインページに「...でサインイン」と表示されます。複数のIDプロバイダーまたはSSOプロトコルを使用する場合に役立ちます。", "display-name-label": "表示名", - "domain-hint-description": "Azure AD/Entra IDテナントでユーザーの領域を示し、ログインプロセスを合理化するためのパラメーター。", + "domain-hint-description": "", "domain-hint-label": "ドメインヒント", "domain-hint-valid-domain": "このフィールドには有効なドメインを入力する必要があります。", "email-attribute-name-description": "OAuth2 IDトークンの属性マップ内でユーザーメール検索に使用するキーの名前。", @@ -3504,6 +3484,7 @@ "move-modal-field-label": "フォルダ名", "move-modal-text": "この操作により、次のコンテンツが移動されます。", "move-modal-title": "移動", + "move-provisioned-folder": "", "moving": "移動中…", "new-folder-name-required-phrase": "フォルダ名が必要です。", "selected-mix-resources-modal-text": "プロビジョニング済みのリソースと未プロビジョニングのリソースの両方が選択されています。これらは同時に処理できません。プロビジョニングされたリソースのみまたはプロビジョニングされていないリソースのみを選択して、再試行してください。", @@ -8693,6 +8674,9 @@ "tooltip-hide": "パスワードを非表示", "tooltip-show": "パスワードを表示" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "行を展開する", "collapse": "行を折りたたむ", @@ -8721,6 +8705,9 @@ "series-color-picker-popover": { "y-axis-usage": "右y軸を使用" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "読込中" }, @@ -10155,7 +10142,7 @@ "title": "データソース" }, "databases": { - "title": "データベース" + "title": "" }, "datasources": { "subtitle": "データソースを追加および構成する", @@ -11672,6 +11659,8 @@ "button-cancelling": "", "button-next": "完了", "button-start": "同期を開始", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "この操作によりリポジトリの設定が削除され、すべての変更内容が失われます。本当に破棄してもよろしいですか?", "confirm": "はい、破棄します", @@ -12047,8 +12036,8 @@ "title": "スコープを選択" }, "tree": { - "collapse": "折りたたみ表示", - "expand": "展開", + "collapse": "", + "expand": "", "headline": { "noResults": "クエリに一致する結果が見つかりませんでした。", "recommended": "おすすめ", diff --git a/public/locales/ko-KR/grafana.json b/public/locales/ko-KR/grafana.json index e045a4b1c1d..f50b3cf61bf 100644 --- a/public/locales/ko-KR/grafana.json +++ b/public/locales/ko-KR/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "액세스 거부됨" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "일부 기능은 안정적이며(GA 단계) 기본적으로 활성화되어 있지만, 일부 기능은 현재 예비 베타 단계에 있으며 조기 도입이 가능하도록 제공하고 있습니다.", - "confirm-modal-body-2": "변경 사항을 적용하기 전에 각 기능의 변경이 어떤 영향을 미칠지 파악하는 것이 좋습니다.", - "confirmText-save-changes": "변경 사항 저장", - "get-stage-cell": { - "beta": "베타", - "content-general-availability": "정식 제공", - "deprecated": "사용 중단됨", - "ga": "GA 단계" - }, - "save-changes": "변경 사항 저장", - "saving": "저장 중...", - "title-apply-feature-toggle-changes": "기능 토글 변경 사항 적용" - }, "admin-orgs-table": { "aria-label-delete-org": "조직 삭제", "confirmText-delete": "삭제", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "검색 시 인기도에 따라 대시보드 정렬", "title-team-sync": "팀 동기화" }, - "feature-toggles": { - "restart-pending": "최신 기능 토글 변경 사항을 적용하기 위해 Grafana 인스턴스의 재시작이 보류 중입니다", - "restart-required": "기능 토글 변경 사항을 저장하면 인스턴스가 다시 시작되며, 몇 분이 소요될 수 있습니다", - "sub-title": "기능 토글을 보고 편집합니다. <2>grafana.com에서 기능 토글에 대해 자세히 알아보세요." - }, "get-enterprise": { "contact-us": "무료 체험 문의하기", "description": "30일간 무료로 체험판을 사용하실 수 있습니다. 체험 기간이 종료되기 5일 전에 알려드립니다.", @@ -2173,7 +2154,6 @@ "body-queries-expressions-configured": "경고할 쿼리 또는 표현식을 하나 이상 생성합니다", "confirmText-deactivate": "비활성화", "expressions": "표현식", - "loading-data-sources": "데이터 소스 로딩 중...", "manipulate-returned-queries-other-operations": "쿼리에서 반환된 데이터를 수학 및 기타 연산으로 조작합니다.", "message": { "a-valid-expression-is-required": "유효한 표현식이 필요합니다." @@ -3248,7 +3228,7 @@ "define-allowed-teams-ids-label": "허용된 팀 ID 정의", "display-name-description": "로그인 페이지에 '다음으로 로그인'으로 표시됩니다. 두 개 이상의 ID 제공자 또는 SSO 프로토콜을 사용하는 경우 유용합니다.", "display-name-label": "표시 이름", - "domain-hint-description": "Azure AD/Entra ID 테넌트에서 사용자의 영역을 나타내고 로그인 프로세스를 간소화하는 매개변수입니다.", + "domain-hint-description": "", "domain-hint-label": "도메인 힌트", "domain-hint-valid-domain": "이 필드는 유효한 도메인이어야 합니다.", "email-attribute-name-description": "OAuth2 ID 토큰의 속성 맵 내에서 사용자 이메일 조회에 사용할 키의 이름입니다.", @@ -3504,6 +3484,7 @@ "move-modal-field-label": "폴더 이름", "move-modal-text": "이 작업을 수행하면 다음 콘텐츠가 이동됩니다.", "move-modal-title": "이동", + "move-provisioned-folder": "", "moving": "이동 중...", "new-folder-name-required-phrase": "폴더 이름은 필수 입력 항목입니다.", "selected-mix-resources-modal-text": "프로비저닝된 리소스와 프로비저닝되지 않은 리소스를 모두 선택하셨습니다. 이 두 가지는 함께 처리할 수 없습니다. 프로비저닝된 리소스만 또는 프로비저닝되지 않은 리소스만 선택한 후 다시 시도해 주세요.", @@ -8693,6 +8674,9 @@ "tooltip-hide": "비밀번호 숨기기", "tooltip-show": "비밀번호" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "행 펼치기", "collapse": "행 접기", @@ -8721,6 +8705,9 @@ "series-color-picker-popover": { "y-axis-usage": "오른쪽 y축 사용" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "로딩 중" }, @@ -10155,7 +10142,7 @@ "title": "데이터 소스" }, "databases": { - "title": "데이터베이스" + "title": "" }, "datasources": { "subtitle": "데이터 소스 추가 및 구성", @@ -11672,6 +11659,8 @@ "button-cancelling": "", "button-next": "완료", "button-start": "동기화 시작", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "이렇게 하면 리포지토리 구성이 삭제되고 지금까지의 작업 내용이 모두 사라집니다. 정말 변경 사항을 취소하시겠어요?", "confirm": "네, 취소해 주세요", @@ -12047,8 +12036,8 @@ "title": "범위 선택" }, "tree": { - "collapse": "접기", - "expand": "펼치기", + "collapse": "", + "expand": "", "headline": { "noResults": "쿼리에 대해 찾은 결과 없음", "recommended": "권장", diff --git a/public/locales/nl-NL/grafana.json b/public/locales/nl-NL/grafana.json index d40c85a15fd..e88d4cc4ed9 100644 --- a/public/locales/nl-NL/grafana.json +++ b/public/locales/nl-NL/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Toegang geweigerd" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Sommige functies zijn stabiel (GA) en standaard ingeschakeld, terwijl sommige zich momenteel in de voorlopige bètafase bevinden en beschikbaar zijn voor vroege adoptie.", - "confirm-modal-body-2": "We raden aan om de implicaties van elke functiewijziging te begrijpen voordat je wijzigingen aanbrengt.", - "confirmText-save-changes": "Wijzigingen opslaan", - "get-stage-cell": { - "beta": "Bèta", - "content-general-availability": "Algemene beschikbaarheid", - "deprecated": "Verouderd", - "ga": "GA" - }, - "save-changes": "Wijzigingen opslaan", - "saving": "Aan het opslaan ...", - "title-apply-feature-toggle-changes": "Wijzigingen voor functieschakelaars toepassen" - }, "admin-orgs-table": { "aria-label-delete-org": "Organisatie verwijderen", "confirmText-delete": "Verwijderen", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Dashboards sorteren op populariteit in zoekopdrachten", "title-team-sync": "Team synchroniseren" }, - "feature-toggles": { - "restart-pending": "Er is een herstart in afwachting van je Grafana-instantie om de nieuwste wijzigingen in de functieschakelaar toe te passen", - "restart-required": "Als je wijzigingen in de functieschakelaar opslaat, wordt de instantie opnieuw gestart, wat een paar minuten kan duren", - "sub-title": "Bekijk en bewerk functieschakelaars. Lees meer over functieschakelaars op <2>grafana.com ." - }, "get-enterprise": { "contact-us": "Neem contact met ons op en ontvang een gratis proefperiode", "description": "Je kunt de proefversie 30 dagen gratis gebruiken. Vijf dagen voordat de proefperiode afloopt ontvang je van ons een herinnering.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Maak ten minste één query of expressie om te worden gewaarschuwd", "confirmText-deactivate": "Deactiveren", "expressions": "Expressie", - "loading-data-sources": "Gegevensbronnen laden...", "manipulate-returned-queries-other-operations": "Manipuleer gegevens die worden geretourneerd uit query's met wiskundige en andere bewerkingen.", "message": { "a-valid-expression-is-required": "Een geldige expressie is vereist" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Toegestane team-id's definiëren", "display-name-description": "Wordt weergegeven op de inlogpagina als 'Aanmelden met ...'. Handig als je meer dan één identiteitsprovider of SSO-protocol gebruikt.", "display-name-label": "Weergavenaam", - "domain-hint-description": "Parameter om het domein van de gebruiker in de Azure AD/Entra ID-tenant aan te geven en het inlogproces te stroomlijnen.", + "domain-hint-description": "", "domain-hint-label": "Domeinhint", "domain-hint-valid-domain": "Dit veld moet een geldig domein zijn.", "email-attribute-name-description": "Naam van de sleutel die moet worden gebruikt voor het opzoeken van gebruikers-e-mails in de attribuutkaart van het OAuth2 ID-token.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Mapnaam", "move-modal-text": "Met deze actie wordt de volgende inhoud verplaatst:", "move-modal-title": "Verplaatsen", + "move-provisioned-folder": "", "moving": "Bezig met verplaatsen ...", "new-folder-name-required-phrase": "Mapnaam is vereist.", "selected-mix-resources-modal-text": "Je hebt zowel toegewezen als niet-toegewezen bronnen geselecteerd. Deze kunnen niet samen worden verwerkt. Selecteer alleen toegewezen bronnen of alleen niet-toegewezen bronnen en probeer het opnieuw.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Wachtwoord verbergen", "tooltip-show": "Wachtwoord weergeven" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Rij uitvouwen", "collapse": "Rij samenvouwen", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Rechter y-as gebruiken" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Bezig met laden" }, @@ -10186,7 +10173,7 @@ "title": "Gegevensbronnen" }, "databases": { - "title": "Databases" + "title": "" }, "datasources": { "subtitle": "Gegevensbronnen toevoegen en configureren", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Voltooien", "button-start": "Synchronisatie starten", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Hiermee wordt de repository-configuratie verwijderd en verlies je alle voortgang. Weet je zeker dat je je wijzigingen wilt negeren?", "confirm": "Ja, verwijderen", @@ -12090,8 +12079,8 @@ "title": "Scopes selecteren" }, "tree": { - "collapse": "Samenvouwen", - "expand": "Uitvouwen", + "collapse": "", + "expand": "", "headline": { "noResults": "Geen resultaten gevonden voor je zoekopdracht", "recommended": "Aanbevolen", diff --git a/public/locales/pl-PL/grafana.json b/public/locales/pl-PL/grafana.json index a32da52687d..6e98e44485a 100644 --- a/public/locales/pl-PL/grafana.json +++ b/public/locales/pl-PL/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Odmowa dostępu" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Niektóre funkcje są stabilne (GA) i domyślnie włączone, podczas gdy inne są obecnie we wstępnej fazie beta i dostępne do wczesnego wdrożenia.", - "confirm-modal-body-2": "Przed wprowadzeniem modyfikacji zalecamy przeanalizowanie konsekwencji każdej zmiany funkcji.", - "confirmText-save-changes": "Zapisz zmiany", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Ogólna dostępność", - "deprecated": "Wycofane", - "ga": "Ogólna dostępność" - }, - "save-changes": "Zapisz zmiany", - "saving": "Zapisywanie…", - "title-apply-feature-toggle-changes": "Zastosuj zmiany przełącznika funkcji" - }, "admin-orgs-table": { "aria-label-delete-org": "Usuń organizację", "confirmText-delete": "Usuń", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Sortowanie pulpitów według popularności w wynikach wyszukiwania", "title-team-sync": "Synchronizacja z zespołem" }, - "feature-toggles": { - "restart-pending": "Oczekiwanie na ponowne uruchomienie instancji Grafany w celu zastosowania najnowszych zmian przełączników funkcji", - "restart-required": "Zapisanie zmian przełączników funkcji spowoduje ponowne uruchomienie instancji, co może potrwać kilka minut", - "sub-title": "Przełączniki wyświetlania i edycji funkcji. Więcej informacji na temat przełączników funkcji można znaleźć na stronie <2>grafana.com." - }, "get-enterprise": { "contact-us": "Skontaktuj się z nami i skorzystaj z bezpłatnej wersji próbnej", "description": "Z wersji próbnej możesz korzystać za darmo przez 30 dni. O zbliżaniu się końca okresu próbnego przypomnimy Ci pięć dni przed jego zakończeniem.", @@ -2197,7 +2178,6 @@ "body-queries-expressions-configured": "Utwórz co najmniej jedno zapytanie lub wyrażenie, które będzie powodować wyświetlenie alertu", "confirmText-deactivate": "Wyłącz", "expressions": "Wyrażenia", - "loading-data-sources": "Wczytywanie źródeł danych…", "manipulate-returned-queries-other-operations": "Manipulowanie danymi zwracanymi z zapytań za pomocą działań matematycznych i innych operacji.", "message": { "a-valid-expression-is-required": "Wymagane jest prawidłowe wyrażenie" @@ -3284,7 +3264,7 @@ "define-allowed-teams-ids-label": "Zdefiniuj dozwolone identyfikatory zespołów", "display-name-description": "Nazwa będzie się wyświetlać na stronie logowania jako część tekstu „Zaloguj się za pomocą...”. To przydatne, jeśli korzystasz z więcej niż jednego dostawcy tożsamości lub protokołów SSO.", "display-name-label": "Nazwa wyświetlana", - "domain-hint-description": "Parametr wskazujący dziedzinę użytkownika w dzierżawie Azure AD/Entra ID i usprawniający proces logowania.", + "domain-hint-description": "", "domain-hint-label": "Wskazówka dotycząca domeny", "domain-hint-valid-domain": "To pole musi zawierać prawidłową domenę.", "email-attribute-name-description": "Nazwa klucza używanego do wyszukiwania adresu e-mail użytkownika w mapie atrybutów tokena OAuth2 ID.", @@ -3540,6 +3520,7 @@ "move-modal-field-label": "Nazwa folderu", "move-modal-text": "To działanie spowoduje przeniesienie następującej zawartości:", "move-modal-title": "Przenieś", + "move-provisioned-folder": "", "moving": "Przenoszenie…", "new-folder-name-required-phrase": "Wymagana jest nazwa folderu.", "selected-mix-resources-modal-text": "Wybrano zarówno zasoby udostępnione, jak i nieudostępnione. Nie można ich przetwarzać razem. Wybierz tylko zasoby udostępnione lub tylko zasoby nieudostępnione i spróbuj ponownie.", @@ -8762,6 +8743,9 @@ "tooltip-hide": "Ukryj hasło", "tooltip-show": "Pokaż hasło" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Rozwiń wiersz", "collapse": "Zwiń wiersz", @@ -8790,6 +8774,9 @@ "series-color-picker-popover": { "y-axis-usage": "Użyj prawej półosi OY" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Ładowanie" }, @@ -10248,7 +10235,7 @@ "title": "Źródła danych" }, "databases": { - "title": "Bazy danych" + "title": "" }, "datasources": { "subtitle": "Dodaj i skonfiguruj źródła danych", @@ -11792,6 +11779,8 @@ "button-cancelling": "", "button-next": "Zakończ", "button-start": "Rozpocznij synchronizację", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Spowoduje to usunięcie konfiguracji repozytorium i utracisz wszystkie postępy. Czy na pewno chcesz odrzucić zmiany?", "confirm": "Tak, odrzuć", @@ -12176,8 +12165,8 @@ "title": "Wybierz zakresy" }, "tree": { - "collapse": "Zwiń", - "expand": "Rozwiń", + "collapse": "", + "expand": "", "headline": { "noResults": "Nie znaleziono wyników dla tego zapytania", "recommended": "Zalecane", diff --git a/public/locales/pt-BR/grafana.json b/public/locales/pt-BR/grafana.json index e599999c67c..b3d1d198556 100644 --- a/public/locales/pt-BR/grafana.json +++ b/public/locales/pt-BR/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Acesso negado" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Alguns recursos estão estáveis (GA) e habilitados por padrão, enquanto outros estão atualmente em fase Beta preliminar, disponíveis para acesso antecipado.", - "confirm-modal-body-2": "Aconselhamos que você entenda o que cada mudança de recurso pode causar antes de fazer alterações.", - "confirmText-save-changes": "Salvar alterações", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Disponibilidade geral", - "deprecated": "Descontinuado", - "ga": "GA" - }, - "save-changes": "Salvar alterações", - "saving": "Salvando...", - "title-apply-feature-toggle-changes": "Aplicar alterações de alternância de recursos" - }, "admin-orgs-table": { "aria-label-delete-org": "Excluir org.", "confirmText-delete": "Excluir", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Classificar painéis por popularidade na pesquisa", "title-team-sync": "Sincronização de equipe" }, - "feature-toggles": { - "restart-pending": "Há uma reinicialização pendente para que sua instância da Grafana aplique as alterações mais recentes de alternância de recursos", - "restart-required": "Salvar as alterações de alternância de recursos solicitará a reinicialização da instância, o que pode levar alguns minutos", - "sub-title": "Visualize e edite feature toggles. Leia mais sobre feature toggles em <2>grafana.com." - }, "get-enterprise": { "contact-us": "Contate-nos e faça uma avaliação grátis", "description": "Você pode usar a versão de avaliação gratuitamente por 30 dias. Nós iremos te lembrar cinco dias antes do fim do período de avaliação.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Crie pelo menos uma consulta ou expressão para receber alertas", "confirmText-deactivate": "Desativar", "expressions": "Expressões", - "loading-data-sources": "Carregando fontes de dados…", "manipulate-returned-queries-other-operations": "Manipule os dados que retornaram das consultas usando operações matemáticas e outras operações.", "message": { "a-valid-expression-is-required": "Uma expressão válida é necessária" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Definir IDs das equipes permitidas", "display-name-description": "Será exibido na página de login como \"Entrar com...\". É uma opção conveniente caso você use mais de um provedor de identidade ou protocolos SSO.", "display-name-label": "Nome de exibição", - "domain-hint-description": "Parâmetro para indicar o âmbito do usuário no locatário do Azure AD/Entra ID e agilizar o processo de login.", + "domain-hint-description": "", "domain-hint-label": "Dica de domínio", "domain-hint-valid-domain": "Este campo deve ser um domínio válido.", "email-attribute-name-description": "Nome da chave a ser usada para a consulta de e-mail do usuário no mapa de atributos do token de ID OAuth2.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Nome da pasta", "move-modal-text": "Esta ação moverá o seguinte conteúdo:", "move-modal-title": "Mover", + "move-provisioned-folder": "", "moving": "Movendo...", "new-folder-name-required-phrase": "O nome da pasta é obrigatório.", "selected-mix-resources-modal-text": "Você selecionou recursos provisionados e não provisionados. Não éo possível processá-los juntos. Selecione apenas um dos tipos de recurso e tente novamente.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Ocultar senha", "tooltip-show": "Exibir senha" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Expandir linha", "collapse": "Recolher linha", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Usar o eixo y à direita" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Carregando" }, @@ -10186,7 +10173,7 @@ "title": "Fontes de dados" }, "databases": { - "title": "Bancos de dados" + "title": "" }, "datasources": { "subtitle": "Adicione e configure fontes de dados", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Finalizar", "button-start": "Iniciar sincronização", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Isso excluirá a configuração do repositório e você perderá todo o progresso. Tem certeza de que deseja descartar suas alterações?", "confirm": "Sim, descartar", @@ -12090,8 +12079,8 @@ "title": "Selecionar escopos" }, "tree": { - "collapse": "Recolher", - "expand": "Expandir", + "collapse": "", + "expand": "", "headline": { "noResults": "Nenhum resultado encontrado para sua consulta", "recommended": "Recomendado", diff --git a/public/locales/pt-PT/grafana.json b/public/locales/pt-PT/grafana.json index 71f6f957357..041324ee396 100644 --- a/public/locales/pt-PT/grafana.json +++ b/public/locales/pt-PT/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Acesso negado" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Algumas funcionalidades são estáveis (GA) e ativadas por predefinição, enquanto outras estão atualmente na sua fase Beta preliminar, disponíveis para utilizar antecipadamente.", - "confirm-modal-body-2": "Aconselhamos a compreensão das implicações de cada alteração de funcionalidade antes de efetuar modificações.", - "confirmText-save-changes": "Guardar alterações", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Disponibilidade geral", - "deprecated": "Obsoleto", - "ga": "GA" - }, - "save-changes": "Guardar alterações", - "saving": "A guardar...", - "title-apply-feature-toggle-changes": "Aplicar as alterações da alternância de funcionalidades" - }, "admin-orgs-table": { "aria-label-delete-org": "Eliminar organização", "confirmText-delete": "Eliminar", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Ordenar painéis de controlo por popularidade na pesquisa", "title-team-sync": "Sincronização de equipa" }, - "feature-toggles": { - "restart-pending": "Está pendente um reinício para a sua instância Grafana aplicar as alterações de alternância de funcionalidades mais recentes", - "restart-required": "Guardar as alterações de alternância de funcionalidades solicitará uma reinicialização da instância, o que pode demorar alguns minutos", - "sub-title": "Ver e editar opções de funcionalidades. Leia mais sobre opções de funcionalidades em <2>grafana.com." - }, "get-enterprise": { "contact-us": "Contacte-nos e obtenha um teste gratuito", "description": "Pode utilizar a versão de teste gratuitamente durante 30 dias. Receberá um aviso cinco dias antes do final do período de teste.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Criar pelo menos uma consulta ou expressão para receber alertas de", "confirmText-deactivate": "Desativar", "expressions": "Expressões", - "loading-data-sources": "A carregar origens de dados...", "manipulate-returned-queries-other-operations": "Manipule os dados devolvidos das consultas com operações matemáticas e outras.", "message": { "a-valid-expression-is-required": "É necessária uma expressão válida" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Definir as ID das equipas permitidas", "display-name-description": "Será mostrado na página de início de sessão como \"Iniciar sessão com...\". Útil se utilizar mais de um fornecedor de identidade ou protocolos SSO.", "display-name-label": "Nome de exibição", - "domain-hint-description": "Parâmetro para indicar o domínio do utilizador no espaço do Azure AD/Entra ID e agilizar o processo de início de sessão.", + "domain-hint-description": "", "domain-hint-label": "Dica de domínio", "domain-hint-valid-domain": "Este campo deve ser um domínio válido.", "email-attribute-name-description": "Nome da chave a utilizar para a pesquisa de e-mail do utilizador no mapa de atributos do token de ID OAuth2.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Nome da pasta", "move-modal-text": "Esta ação moverá o seguinte conteúdo:", "move-modal-title": "Mover", + "move-provisioned-folder": "", "moving": "A mover...", "new-folder-name-required-phrase": "O nome da pasta é obrigatório.", "selected-mix-resources-modal-text": "Selecionou recursos provisionados e não provisionados. Estes não podem ser processados em conjunto. Selecione apenas recursos provisionados ou apenas recursos não provisionados e tente novamente.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Ocultar palavra-passe", "tooltip-show": "Mostrar palavra-passe" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Expandir linha", "collapse": "Recolher linha", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Utilizar o eixo y direito" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "A carregar" }, @@ -10186,7 +10173,7 @@ "title": "Origens de dados" }, "databases": { - "title": "Bases de dados" + "title": "" }, "datasources": { "subtitle": "Adicionar e configurar origens de dados", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Concluir", "button-start": "Iniciar sincronização", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Isto eliminará a configuração do repositório e todo o progresso será perdido. Tem a certeza de que pretende rejeitar as suas alterações?", "confirm": "Sim, rejeitar", @@ -12090,8 +12079,8 @@ "title": "Selecionar âmbitos" }, "tree": { - "collapse": "Recolher", - "expand": "Expandir", + "collapse": "", + "expand": "", "headline": { "noResults": "Não foram encontrados resultados para a sua consulta", "recommended": "Recomendado", diff --git a/public/locales/ru-RU/grafana.json b/public/locales/ru-RU/grafana.json index 28b1c4d15c1..2241b75dede 100644 --- a/public/locales/ru-RU/grafana.json +++ b/public/locales/ru-RU/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Доступ запрещен" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Одни функции работают в стабильном режиме (GA) и включены по умолчанию, а другие находятся на стадии предварительной бета-версии, доступной для раннего использования.", - "confirm-modal-body-2": "Прежде чем вносить изменения, рекомендуем ознакомиться с их последствиями для каждой функции.", - "confirmText-save-changes": "Сохранить изменения", - "get-stage-cell": { - "beta": "Бета", - "content-general-availability": "Общедоступность", - "deprecated": "Устаревший", - "ga": "GA" - }, - "save-changes": "Сохранить изменения", - "saving": "Сохранение...", - "title-apply-feature-toggle-changes": "Внесение изменений в переключатели функциональности" - }, "admin-orgs-table": { "aria-label-delete-org": "Удалить организацию", "confirmText-delete": "Удалить", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Сортировка дашбордов по популярности в поиске", "title-team-sync": "Синхронизация команд" }, - "feature-toggles": { - "restart-pending": "Чтобы применить последние изменения в переключателях функциональности, необходимо перезапустить ваш экземпляр Grafana", - "restart-required": "Сохранение изменений в переключателях функциональности приведет к перезапуску экземпляра, что может занять несколько минут", - "sub-title": "Просмотр и редактирование переключателей функций Подробнее о переключателях функций на сайте <2>grafana.com" - }, "get-enterprise": { "contact-us": "Свяжитесь с нами и получите бесплатную пробную версию", "description": "Вы можете бесплатно использовать пробную версию в течение 30 дней. Мы отправим вам напоминание за пять дней до окончания пробного периода.", @@ -2197,7 +2178,6 @@ "body-queries-expressions-configured": "Создайте хотя бы один запрос или выражение, на основе которых будет отправляться оповещение", "confirmText-deactivate": "Деактивировать", "expressions": "Выражения", - "loading-data-sources": "Загрузка источников данных...", "manipulate-returned-queries-other-operations": "Управляйте данными, возвращаемыми в результате запросов, с помощью математических и других операций.", "message": { "a-valid-expression-is-required": "Укажите допустимое выражение" @@ -3284,7 +3264,7 @@ "define-allowed-teams-ids-label": "Определить идентификаторы разрешенных команд", "display-name-description": "Будет отображаться на странице входа как «Войти с помощью...». Параметр полезен, если вы используете несколько поставщиков удостоверений или протоколов единого входа.", "display-name-label": "Отображаемое имя", - "domain-hint-description": "Параметр, указывающий область пользователя в клиенте Azure AD/Entra ID и упрощающий процесс входа.", + "domain-hint-description": "", "domain-hint-label": "Подсказка домена", "domain-hint-valid-domain": "В этом поле должен быть указан действительный домен.", "email-attribute-name-description": "Имя ключа, используемого для поиска адреса эл. почты пользователя в карте атрибутов идентификационного токена OAuth2.", @@ -3540,6 +3520,7 @@ "move-modal-field-label": "Имя папки", "move-modal-text": "Это действие приведет к перемещению следующего контента:", "move-modal-title": "Перемещение", + "move-provisioned-folder": "", "moving": "Перемещение...", "new-folder-name-required-phrase": "Необходимо указать имя папки.", "selected-mix-resources-modal-text": "Вы выбрали как подготовленные, так и неподготовленные ресурсы. Совместная обработка невозможна. Выберите только подготовленные или неподготовленные ресурсы и повторите попытку.", @@ -8762,6 +8743,9 @@ "tooltip-hide": "Скрыть пароль", "tooltip-show": "Показать пароль" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Развернуть строку", "collapse": "Свернуть строку", @@ -8790,6 +8774,9 @@ "series-color-picker-popover": { "y-axis-usage": "Использовать правую ось y" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Загрузка" }, @@ -10248,7 +10235,7 @@ "title": "Источники данных" }, "databases": { - "title": "Базы данных" + "title": "" }, "datasources": { "subtitle": "Добавление и настройка источников данных", @@ -11792,6 +11779,8 @@ "button-cancelling": "", "button-next": "Готово", "button-start": "Начать синхронизацию", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Конфигурация репозитория будет удалена, и вы потеряете весь прогресс. Действительно отменить изменения?", "confirm": "Да, отменить", @@ -12176,8 +12165,8 @@ "title": "Выбор областей" }, "tree": { - "collapse": "Свернуть", - "expand": "Развернуть", + "collapse": "", + "expand": "", "headline": { "noResults": "По вашему запросу ничего не найдено", "recommended": "Рекомендуемые", diff --git a/public/locales/sv-SE/grafana.json b/public/locales/sv-SE/grafana.json index 114f057d2c8..ac34fbbf4e8 100644 --- a/public/locales/sv-SE/grafana.json +++ b/public/locales/sv-SE/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Åtkomst nekad" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Vissa funktioner är stabila (GA) och aktiverade som standard, medan vissa för närvarande är i sin preliminära betafas och finns tillgängliga för tidig tillämpning.", - "confirm-modal-body-2": "Vi rekommenderar att du ser till att du förstår konsekvenserna av varje funktionsändring innan du gör några ändringar.", - "confirmText-save-changes": "Spara ändringarna", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Allmän tillgänglighet", - "deprecated": "Föråldrat", - "ga": "GA" - }, - "save-changes": "Spara ändringarna", - "saving": "Sparar ...", - "title-apply-feature-toggle-changes": "Tillämpa ändringarna för funktionsväxlare" - }, "admin-orgs-table": { "aria-label-delete-org": "Radera org", "confirmText-delete": "Radera", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Sortera instrumentpaneler efter popularitet vid sökningar", "title-team-sync": "Teamsynkronisering" }, - "feature-toggles": { - "restart-pending": "En omstart väntar för din Grafana-instans för att tillämpa de senaste ändringarna för växling av funktion", - "restart-required": "Om du sparar ändringar av funktionen kommer en omstart av instansen att uppmanas, vilket kan ta några minuter", - "sub-title": "Visa och redigera funktionsväxlare. Läs mer om funktionsväxlare på <2>grafana.com." - }, "get-enterprise": { "contact-us": "Kontakta oss och få en gratis provperiod", "description": "Du kan använda provversionen gratis i 30 dagar. Du får en påminnelse av oss fem dagar innan provperioden löper ut.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Skapa minst en fråga eller ett uttryck att bli larmad för", "confirmText-deactivate": "Inaktivera", "expressions": "Uttryck", - "loading-data-sources": "Laddar datakällor …", "manipulate-returned-queries-other-operations": "Manipulera data som returneras från frågor med matematikfunktioner och andra operationer.", "message": { "a-valid-expression-is-required": "Ett giltigt uttryck krävs" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "Definiera tillåtna team-ID", "display-name-description": "Kommer att visas på inloggningssidan som ”Logga in med …” Användbart om du använder fler än en identitetsleverantör eller SSO-protokoll.", "display-name-label": "Visningsnamn", - "domain-hint-description": "Parameter som anger användarens domän i Azure AD/Entra ID-klienten och effektiviserar inloggningsprocessen.", + "domain-hint-description": "", "domain-hint-label": "Domäntips", "domain-hint-valid-domain": "Detta fält måste vara en giltig domän.", "email-attribute-name-description": "Namn på nyckeln som ska användas för användarens e-postsökning inom attributkartan för OAuth2 ID-token.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Mappnamn", "move-modal-text": "Denna åtgärd flyttar följande innehåll:", "move-modal-title": "Flytta", + "move-provisioned-folder": "", "moving": "Flyttar ...", "new-folder-name-required-phrase": "Mappnamn krävs.", "selected-mix-resources-modal-text": "Du har valt både provisionerade och icke-provisionerade resurser. Dessa kan inte hanteras tillsammans. Välj enbart provisionerade eller enbart icke-provisionerade resurser och försök igen.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Dölj lösenord", "tooltip-show": "Visa lösenord" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Expandera rad", "collapse": "Dölj rad", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Använd höger y-axel" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Laddar" }, @@ -10186,7 +10173,7 @@ "title": "Datakällor" }, "databases": { - "title": "Databaser" + "title": "" }, "datasources": { "subtitle": "Lägg till och konfigurera datakällor", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Slutför", "button-start": "Påbörja synkronisering", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Detta kommer att radera lagringsplatskonfigurationen och du kommer att förlora alla ändringar. Är du säker på att du vill kassera dina ändringar?", "confirm": "Ja, kassera", @@ -12090,8 +12079,8 @@ "title": "Välj omfattningar" }, "tree": { - "collapse": "Minimera", - "expand": "Expandera", + "collapse": "", + "expand": "", "headline": { "noResults": "Inga resultat hittades för din fråga", "recommended": "Rekommenderas", diff --git a/public/locales/tr-TR/grafana.json b/public/locales/tr-TR/grafana.json index 198a6c03bdf..d0f88d3b83e 100644 --- a/public/locales/tr-TR/grafana.json +++ b/public/locales/tr-TR/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "Erişim reddedildi" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "Bazı özellikler kararlı sürümdedir (Genel Kullanım/GA) ve varsayılan olarak etkindir, bazıları ise henüz ön aşamada olan Beta aşamasındadır ve erken benimseme için kullanılabilir durumdadır.", - "confirm-modal-body-2": "Özelliklerde değişiklik yapmadan önce etkilerini gözden geçirmenizi öneririz.", - "confirmText-save-changes": "Değişiklikleri kaydet", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "Genel kullanılabilirlik", - "deprecated": "Kullanımdan kaldırıldı", - "ga": "Genel kullanıma açıldı (GA)" - }, - "save-changes": "Değişiklikleri kaydet", - "saving": "Kaydediliyor…", - "title-apply-feature-toggle-changes": "Özellik geçişi değişikliklerini uygula" - }, "admin-orgs-table": { "aria-label-delete-org": "Kuruluşu sil", "confirmText-delete": "Sil", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "Aramada panoları popülerliğe göre sırala", "title-team-sync": "Ekip Senkronizasyonu" }, - "feature-toggles": { - "restart-pending": "Grafana örneğinizde en son özellik değişikliklerini uygulamak için yeniden başlatma bekleniyor", - "restart-required": "Özellik geçişi değişikliklerini kaydetmek örneğin yeniden başlatılmasını gerektirecek, bu işlem birkaç dakika sürebilir", - "sub-title": "Özellik anahtarlarını görüntüleyin ve düzenleyin. Özellik anahtarları hakkında daha fazla bilgi için <2>grafana.com adresini ziyaret edin." - }, "get-enterprise": { "contact-us": "Bizimle iletişime geçerek ücretsiz deneme sürümünü edinin.", "description": "30 gün boyunca deneme sürümünü ücretsiz olarak kullanabilirsiniz. Deneme süresi bitmeden beş gün önce size hatırlatma yapacağız.", @@ -2181,7 +2162,6 @@ "body-queries-expressions-configured": "Uyarı oluşturabilmek için en az bir sorgu veya ifade oluşturun", "confirmText-deactivate": "Devre dışı bırak", "expressions": "İfadeler", - "loading-data-sources": "Veri kaynakları yükleniyor...", "manipulate-returned-queries-other-operations": "Sorgulardan dönen verileri matematik ve diğer işlemlerle düzenleyin.", "message": { "a-valid-expression-is-required": "Geçerli bir ifade gereklidir" @@ -3260,7 +3240,7 @@ "define-allowed-teams-ids-label": "İzin verilen ekip kimliklerini tanımlayın", "display-name-description": "Giriş sayfasında \"Şununla giriş yap:\" olarak gösterilecektir. Birden fazla kimlik sağlayıcı veya SSO protokolü kullanıyorsanız faydalıdır.", "display-name-label": "Görünen ad", - "domain-hint-description": "Azure AD/Entra ID kiracısında kullanıcının alanını belirtmek ve oturum açma işlemini kolaylaştırmak için parametre.", + "domain-hint-description": "", "domain-hint-label": "Alan adı ipucu", "domain-hint-valid-domain": "Bu alan geçerli bir alan adı olmalıdır.", "email-attribute-name-description": "OAuth2 kimlik belirteci öznitelik eşlemesinde kullanıcı e-postasını aramak için kullanılacak anahtarın adı.", @@ -3516,6 +3496,7 @@ "move-modal-field-label": "Klasör adı", "move-modal-text": "Bu işlem aşağıdaki içerikleri taşıyacaktır:", "move-modal-title": "Taşı", + "move-provisioned-folder": "", "moving": "Taşınıyor...", "new-folder-name-required-phrase": "Klasör adı gereklidir.", "selected-mix-resources-modal-text": "Hem sağlanmış hem de sağlanmamış kaynakları seçtiniz. Bunlar birlikte işlenemez. Lütfen yalnızca sağlanmış kaynakları veya yalnızca sağlanmamış kaynakları seçin ve tekrar deneyin.", @@ -8716,6 +8697,9 @@ "tooltip-hide": "Parolayı gizle", "tooltip-show": "Parolayı göster" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "Satırı genişlet", "collapse": "Satırı daralt", @@ -8744,6 +8728,9 @@ "series-color-picker-popover": { "y-axis-usage": "Sağ Y eksenini kullan" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "Yükleniyor" }, @@ -10186,7 +10173,7 @@ "title": "Veri kaynakları" }, "databases": { - "title": "Veri tabanları " + "title": "" }, "datasources": { "subtitle": "Veri kaynakları ekleyin ve yapılandırın", @@ -11712,6 +11699,8 @@ "button-cancelling": "", "button-next": "Sonlandır", "button-start": "Senkronizasyonu başlat", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "Bu işlem, depo yapılandırmasını silecek ve tüm ilerlemeyi kaybedeceksiniz. Değişikliklerinizi silmek istediğinizden emin misiniz?", "confirm": "Evet, yoksay", @@ -12090,8 +12079,8 @@ "title": "Kapsam seçin" }, "tree": { - "collapse": "Daralt", - "expand": "Genişlet", + "collapse": "", + "expand": "", "headline": { "noResults": "Sorgunuz için sonuç bulunamadı", "recommended": "Önerilen", diff --git a/public/locales/zh-Hans/grafana.json b/public/locales/zh-Hans/grafana.json index a811f038adf..42311600f4c 100644 --- a/public/locales/zh-Hans/grafana.json +++ b/public/locales/zh-Hans/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "访问被拒绝" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "有些功能可以稳定运行(GA 正式版)并默认启用,而有些功能目前处于初步的 Beta 测试阶段,可供用户提前试用。", - "confirm-modal-body-2": "我们建议在修改之前了解每个功能更改所带来的影响。", - "confirmText-save-changes": "保存更改", - "get-stage-cell": { - "beta": "Beta 测试版", - "content-general-availability": "正式发布", - "deprecated": "已弃用", - "ga": "GA" - }, - "save-changes": "保存更改", - "saving": "正在保存...", - "title-apply-feature-toggle-changes": "应用功能切换更改" - }, "admin-orgs-table": { "aria-label-delete-org": "删除组织", "confirmText-delete": "删除", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "按搜索中的受欢迎程度对数据面板排序", "title-team-sync": "团队同步" }, - "feature-toggles": { - "restart-pending": "您的 Grafana 实例正在等待重新启动,以应用最新的功能切换更改", - "restart-required": "保存功能切换更改将提示重新启动实例,这可能需要几分钟的时间", - "sub-title": "查看和编辑功能切换。在 <2>grafana.com 上阅读有关功能切换的更多信息。" - }, "get-enterprise": { "contact-us": "联系我们并获得免费试用", "description": "您可以免费使用试用版 30 天。距离试用期结束还剩五天的时候,我们会提醒您。", @@ -2173,7 +2154,6 @@ "body-queries-expressions-configured": "至少创建一个查询或表达式以发出警报", "confirmText-deactivate": "停用", "expressions": "表达式", - "loading-data-sources": "正在加载数据源...", "manipulate-returned-queries-other-operations": "使用数学和其他操作处理查询返回的数据。", "message": { "a-valid-expression-is-required": "有效表达式为必填项" @@ -3248,7 +3228,7 @@ "define-allowed-teams-ids-label": "定义允许的团队 ID", "display-name-description": "将在登录页面上显示为“使用... 登录”。如果您使用多个身份提供程序或 SSO 协议,这将很有帮助。", "display-name-label": "显示名称", - "domain-hint-description": "用于指示 Azure AD/Entra ID 租户中用户领域并简化登录流程的参数。", + "domain-hint-description": "", "domain-hint-label": "域提示", "domain-hint-valid-domain": "此字段必须是有效的域。", "email-attribute-name-description": "用于在 OAuth2 ID 令牌的属性映射中查找用户电子邮箱的密钥的名称。", @@ -3504,6 +3484,7 @@ "move-modal-field-label": "文件夹名称", "move-modal-text": "此操作将移动以下内容:", "move-modal-title": "移动", + "move-provisioned-folder": "", "moving": "正在移动...", "new-folder-name-required-phrase": "文件夹名称是必需项。", "selected-mix-resources-modal-text": "您已选择了已预置和未预置的资源。这些资源无法一起处理。请仅选择已预置的资源或未预置的资源,然后重试。", @@ -8693,6 +8674,9 @@ "tooltip-hide": "隐藏密码", "tooltip-show": "显示密码" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "展开行", "collapse": "折叠行", @@ -8721,6 +8705,9 @@ "series-color-picker-popover": { "y-axis-usage": "使用右侧 Y 轴" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "正在加载" }, @@ -10155,7 +10142,7 @@ "title": "数据源" }, "databases": { - "title": "数据库" + "title": "" }, "datasources": { "subtitle": "添加并配置数据源", @@ -11672,6 +11659,8 @@ "button-cancelling": "", "button-next": "完成", "button-start": "开始同步", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "此操作将删除存储库配置,您将丢失所有进度。您确定要丢弃更改吗?", "confirm": "是,丢弃", @@ -12047,8 +12036,8 @@ "title": "选择范围" }, "tree": { - "collapse": "收起", - "expand": "展开", + "collapse": "", + "expand": "", "headline": { "noResults": "未找到与您的查询相关的结果", "recommended": "推荐", diff --git a/public/locales/zh-Hant/grafana.json b/public/locales/zh-Hant/grafana.json index 1779c029c0c..ed4b4e68ad0 100644 --- a/public/locales/zh-Hant/grafana.json +++ b/public/locales/zh-Hant/grafana.json @@ -81,20 +81,6 @@ "title-access-denied": "拒絕存取" } }, - "admin-feature-toggles-table": { - "confirm-modal-body-1": "部分功能穩定 (GA) 且預設為啟用,有些功能目前則處於初步 Beta 階段,可供早期採用。", - "confirm-modal-body-2": "建議您在進行修改前了解各項功能變更的影響。", - "confirmText-save-changes": "儲存變更", - "get-stage-cell": { - "beta": "Beta", - "content-general-availability": "一般可用性", - "deprecated": "已取代", - "ga": "GA" - }, - "save-changes": "儲存變更", - "saving": "正在儲存…", - "title-apply-feature-toggle-changes": "套用功能切換變更" - }, "admin-orgs-table": { "aria-label-delete-org": "刪除組織", "confirmText-delete": "刪除", @@ -140,11 +126,6 @@ "title-sort-dashboards-by-popularity-in-search": "依搜尋中的熱門程度將儀表板排序", "title-team-sync": "團隊同步" }, - "feature-toggles": { - "restart-pending": "您的 Grafana 執行個體正在等待重新啟動,以套用最新的功能切換變更", - "restart-required": "儲存功能切換變更將提示重新啟動執行個體,這可能需要幾分鐘的時間", - "sub-title": "檢視和編輯功能切換。請至 <2>grafana.com 進一步閱讀關於功能切換的資訊。" - }, "get-enterprise": { "contact-us": "聯絡我們,取得免費試用", "description": "您可以免費使用試用版 30 天。我們會在試用期結束前五天提醒您。", @@ -2173,7 +2154,6 @@ "body-queries-expressions-configured": "建立至少一個查詢或表達式以便收到警報", "confirmText-deactivate": "停用", "expressions": "表達式", - "loading-data-sources": "正在載入資料來源…", "manipulate-returned-queries-other-operations": "使用數學和其他運算來處理查詢返回的資料。", "message": { "a-valid-expression-is-required": "必須輸入有效的表達式" @@ -3248,7 +3228,7 @@ "define-allowed-teams-ids-label": "定義允許的團隊 ID", "display-name-description": "將在登入頁面上顯示為「使用...登入」。如果您使用多個身分提供者或 SSO 協定,這項功能會很有幫助。", "display-name-label": "顯示名稱", - "domain-hint-description": "用於指示 Azure AD/Entra ID 租用戶中使用者領域的參數,並簡化登入流程。", + "domain-hint-description": "", "domain-hint-label": "網域提示", "domain-hint-valid-domain": "此欄位必須為有效的網域。", "email-attribute-name-description": "設定用來在 OAuth2 ID 權杖的屬性對應中查找使用者電子郵件的金鑰名稱。", @@ -3504,6 +3484,7 @@ "move-modal-field-label": "資料夾名稱", "move-modal-text": "此動作將移動以下內容:", "move-modal-title": "移動", + "move-provisioned-folder": "", "moving": "正在移動…", "new-folder-name-required-phrase": "必須要有資料夾名稱。", "selected-mix-resources-modal-text": "您已選取已佈建和非佈建資源。這些資源無法一起處理。請僅選取已佈建的資源或僅選取非佈建資源,然後再試一次。", @@ -8693,6 +8674,9 @@ "tooltip-hide": "隱藏密碼", "tooltip-show": "顯示密碼" }, + "range-slider": { + "drag-handle-aria-label": "" + }, "row-expander": { "aria-label-expand": "展開列", "collapse": "收闔列", @@ -8721,6 +8705,9 @@ "series-color-picker-popover": { "y-axis-usage": "使用右 y 軸" }, + "slider": { + "drag-handle-aria-label": "" + }, "spinner": { "aria-label": "正在載入" }, @@ -10155,7 +10142,7 @@ "title": "資料來源" }, "databases": { - "title": "資料庫" + "title": "" }, "datasources": { "subtitle": "新增並設定資料來源", @@ -11672,6 +11659,8 @@ "button-cancelling": "", "button-next": "結束", "button-start": "開始同步處理", + "check-status-button": "", + "check-status-message": "", "discard-modal": { "body": "這將刪除儲存庫設定,您將失去所有進度。確定要捨棄變更嗎?", "confirm": "是的,捨棄", @@ -12047,8 +12036,8 @@ "title": "選取範圍" }, "tree": { - "collapse": "收闔", - "expand": "展開", + "collapse": "", + "expand": "", "headline": { "noResults": "未找到您的查詢結果", "recommended": "建議", diff --git a/yarn.lock b/yarn.lock index 4df524d5425..3f6986db4f0 100644 --- a/yarn.lock +++ b/yarn.lock @@ -30532,8 +30532,8 @@ __metadata: linkType: hard "tar-fs@npm:^3.1.0": - version: 3.1.0 - resolution: "tar-fs@npm:3.1.0" + version: 3.1.1 + resolution: "tar-fs@npm:3.1.1" dependencies: bare-fs: "npm:^4.0.1" bare-path: "npm:^3.0.0" @@ -30544,7 +30544,7 @@ __metadata: optional: true bare-path: optional: true - checksum: 10/272054aa93adf66f3febac1ab666c2e1ae80fb7d9b7483efa9e9216aca6f7900dfecb9ef04e83f179251f95a965f960d760c42ebbb1e04580a182da997af98da + checksum: 10/f7f7540b563e10541dc0b95f710c68fc1fccde0c1177b4d3bab2023c6d18da19d941a8697fdc1abff54914b71b6e5f2dfb0455572b5c8993b2ab76571cbbc923 languageName: node linkType: hard