mirror of https://github.com/grafana/grafana.git
Compare commits
15 Commits
9c18fb854a
...
5e112ec8fa
Author | SHA1 | Date |
---|---|---|
|
5e112ec8fa | |
|
70dc9a0027 | |
|
9d60d03d11 | |
|
725a91e9eb | |
|
b7c73a9bfc | |
|
c3f34efb41 | |
|
84a2f41016 | |
|
7fbd5441bf | |
|
b579efb695 | |
|
f06a12717b | |
|
ed2cecf36e | |
|
5438df01a1 | |
|
6f70cf5e00 | |
|
7d1c6b6bd2 | |
|
6605746227 |
|
@ -1058,6 +1058,10 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
|
|||
/scripts/codemods/explicit-barrel-imports.cjs @grafana/frontend-ops
|
||||
/scripts/rtk-client-generator/ @grafana/grafana-search-navigate-organise
|
||||
|
||||
/scripts/codeowners-manifest/ @grafana/dataviz-squad
|
||||
/scripts/test-coverage-by-codeowner.js @grafana/dataviz-squad
|
||||
/jest.config.codeowner.js @grafana/dataviz-squad
|
||||
|
||||
/scripts/**/generate-transformations* @grafana/datapro
|
||||
/scripts/webpack/ @grafana/frontend-ops
|
||||
/scripts/generate-a11y-report.sh @grafana/grafana-frontend-platform
|
||||
|
|
|
@ -253,3 +253,5 @@ public/mockServiceWorker.js
|
|||
|
||||
# Ignore unified storage kv store files
|
||||
/grafana-kv-data
|
||||
|
||||
/codeowners-manifest/
|
||||
|
|
|
@ -14,6 +14,9 @@ public/sass/*.generated.scss
|
|||
scripts/grafana-server/tmp
|
||||
vendor
|
||||
|
||||
/coverage
|
||||
/codeowners-manifest
|
||||
|
||||
# TS generate from cue by cuetsy
|
||||
**/*.gen.ts
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ require (
|
|||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20251007160934-e642236ea9eb // indirect
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
|
||||
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
|
|
|
@ -721,8 +721,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
|||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 h1:wfehM99Xlpltl9MQx8SITkgFgHmPGqrXoBCVLk/Q6NA=
|
||||
github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
|
||||
github.com/grafana/alerting v0.0.0-20251007160934-e642236ea9eb h1:ejpL3pI9t1ebEFtxaP7TMDKZagCu1nSq1O8op+sO4DY=
|
||||
github.com/grafana/alerting v0.0.0-20251007160934-e642236ea9eb/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
|
||||
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
|
||||
|
|
|
@ -12,9 +12,14 @@ labels:
|
|||
- enterprise
|
||||
- oss
|
||||
menuTitle: Email
|
||||
title: Configure email for Alerting
|
||||
title: Configure email for alert notifications
|
||||
weight: 110
|
||||
refs:
|
||||
configure-contact-points:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/configure-notifications/manage-contact-points/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/manage-contact-points/
|
||||
notification-templates:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/configure-notifications/template-notifications/
|
||||
|
@ -22,83 +27,70 @@ refs:
|
|||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/template-notifications/
|
||||
---
|
||||
|
||||
# Configure email for Alerting
|
||||
# Configure email for alert notifications
|
||||
|
||||
Use the Grafana Alerting - email integration to send email notifications when your alerts are firing. An email is sent when an alert fires and when an alert gets resolved.
|
||||
Use the email integration to send alert notifications to one or more addresses.
|
||||
|
||||
Note that you can customize the `subject` and `message` of the email using [notification templates](ref:notification-templates). However, you cannot add HTML and CSS to email notifications for visual changes.
|
||||
You can customize the [subject and main section of the email body](#optional-settings-using-templates). By default, the subject and body are generated from the alert data included in the notification.
|
||||
|
||||
## Before you begin
|
||||
|
||||
In Grafana OSS, you must configure SMTP settings before you can enable email notifications.
|
||||
|
||||
{{<admonition type="note">}}
|
||||
This section is for Grafana OSS only. For Grafana Cloud, SMTP configuration is not required.
|
||||
In Grafana Cloud, SMTP configuration is not required.
|
||||
{{</admonition>}}
|
||||
|
||||
For Grafana OSS, you enable email notifications by first configuring [SMTP settings](https://grafana.com/docs/grafana/next/setup-grafana/configure-grafana/#smtp) in the Grafana configuration settings.
|
||||
1. Open the [configuration file](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/). The file is typically named `grafana.ini` or `custom.ini` and located in the `conf` directory of your Grafana installation.
|
||||
|
||||
### SMTP configuration
|
||||
1. Configure the [SMTP settings](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#smtp) for your email server in the `[smtp]` section.
|
||||
|
||||
1. Access the configuration file.
|
||||
1. Save your changes and restart Grafana.
|
||||
|
||||
Locate the Grafana configuration file. This file is typically named `grafana.ini` or `custom.ini` and is located in the `conf` directory within the Grafana installation directory.
|
||||
1. Test email notifications by creating a contact point.
|
||||
|
||||
1. Open the configuration file:
|
||||
## Configure Email for a contact point
|
||||
|
||||
Open the configuration file using a text editor.
|
||||
|
||||
1. Locate SMTP settings section.
|
||||
|
||||
Search for the [SMTP settings section](https://grafana.com/docs/grafana/next/setup-grafana/configure-grafana/#smtp) in the configuration file. It starts with `[smtp]`.
|
||||
|
||||
1. Configure SMTP settings.
|
||||
|
||||
Within the `[smtp]` settings section, specify the following parameters:
|
||||
- `enabled = true`: Enables SMTP.
|
||||
- `host`: The hostname or IP address of your SMTP server, and the port number of your SMTP server (commonly 25, 465, or 587). Default is `localhost:25`.
|
||||
- `user`: Your SMTP username (if authentication is required).
|
||||
- `password`: Your SMTP password (if authentication is required).
|
||||
- `from_address`: The email address from which Grafana notifications will be sent.
|
||||
- `from_name`: The name associated with the from_address.
|
||||
- `skip_verify = true`: Skip SSL/TLS certificate verification (useful for testing, but not recommended for production).
|
||||
|
||||
1. Save and close the configuration file.
|
||||
|
||||
After configuring the SMTP settings, save the changes to the configuration file and close the text editor.
|
||||
|
||||
1. Restart Grafana.
|
||||
|
||||
Restart the Grafana service to apply the changes made to the configuration file. The method for restarting Grafana depends on your operating system and how Grafana was installed (e.g., `systemctl restart grafana-server` for systems using systemd).
|
||||
|
||||
1. Test email notifications.
|
||||
|
||||
After restarting Grafana, test the email notification functionality by creating an email contact point.
|
||||
|
||||
## Procedure
|
||||
|
||||
To set up email integration, complete the following steps.
|
||||
To create a contact point with a email integration, complete the following steps.
|
||||
|
||||
1. Navigate to **Alerts & IRM** -> **Alerting** -> **Contact points**.
|
||||
1. Click **+ Add contact point**.
|
||||
1. Enter a contact point name.
|
||||
1. From the Integration list, select **Email**.
|
||||
1. Enter the email addresses you want to send notifications to.
|
||||
|
||||
E-mail addresses are case sensitive. Ensure that the e-mail address entered is correct.
|
||||
For large numbers of emails, the **Single email** checkbox in **Optional Email Settings** allows you to send a single email to all contact points rather than send individual emails to all addresses in the contact point.
|
||||
|
||||
1. For Grafana Alertmanager: click **Test** to check that your integration works.
|
||||
|
||||
1. Click **+ Create contact point**.
|
||||
1. Enter a name for the contact point.
|
||||
1. From the **Integration** list, select **Email**.
|
||||
1. Set up the required [settings](#email-settings) for your Email configuration.
|
||||
1. Click **Save contact point**.
|
||||
|
||||
## Next steps
|
||||
For more details on contact points, including how to test them and enable notifications, refer to [Configure contact points](ref:configure-contact-points).
|
||||
|
||||
The email contact point is ready to receive alert notifications.
|
||||
## Email settings
|
||||
|
||||
To add this contact point to your alert, complete the following steps.
|
||||
| Option | Description |
|
||||
| --------- | ------------------------------------------------------------------------------------------ |
|
||||
| Addresses | The list of email addresses to send the notifications. Email addresses are case sensitive. |
|
||||
|
||||
1. In Grafana, navigate to **Alerting** > **Alert rules**.
|
||||
1. Edit or create a new alert rule.
|
||||
1. Scroll down to the **Configure labels and notifications** section.
|
||||
1. Under Notifications click **Select contact point**.
|
||||
1. From the drop-down menu, select the previously created contact point.
|
||||
1. **Click Save rule and exit**.
|
||||
#### Optional settings
|
||||
|
||||
| Option | Description |
|
||||
| ------------ | ------------------------------------------------------------------------- |
|
||||
| Single email | Send a single email to all email addresses rather than individual emails. |
|
||||
|
||||
#### Optional settings using templates
|
||||
|
||||
{{<admonition type="note">}}
|
||||
You can customize the email subject and main section of the email body, but you can't edit HTML or CSS for visual changes.
|
||||
|
||||
In Grafana OSS and Enterprise, you can edit the full email template. However, this is not officially supported because it's an internal API that may change without prior notice.
|
||||
{{</admonition>}}
|
||||
|
||||
| Option | Description |
|
||||
| ------- | --------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Subject | Sets the email subject, replacing the default template. Supports [notification templates](ref:notification-templates). |
|
||||
| Message | Sets the main section of the email body, replacing the default template. Supports [notification templates](ref:notification-templates). |
|
||||
|
||||
{{< figure src="/media/docs/alerting/custom-email-message5.png" caption="Email notification with custom message." max-width="750px" >}}
|
||||
|
||||
#### Optional notification settings
|
||||
|
||||
| Option | Description |
|
||||
| ------------------------ | ------------------------------------------------------------------- |
|
||||
| Disable resolved message | Enable this option to prevent notifications when an alert resolves. |
|
||||
|
|
|
@ -55,322 +55,74 @@ refs:
|
|||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
configure-cloudwatch:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
cloudwatch-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
cloudwatch-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
cloudwatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
query-caching:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/variables/
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/annotate-visualizations/
|
||||
set-up-grafana-monitoring:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/set-up-grafana-monitoring/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/set-up-grafana-monitoring/
|
||||
transformations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/transform-data/
|
||||
visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
|
||||
---
|
||||
|
||||
# Amazon CloudWatch data source
|
||||
|
||||
Grafana ships with built-in support for Amazon CloudWatch.
|
||||
This topic describes queries, templates, variables, and other configuration specific to the CloudWatch data source.
|
||||
Amazon CloudWatch is the AWS native monitoring and observability service that collects, aggregates, and stores metrics, logs, and events from AWS resources, applications, and services. CloudWatch enables you to visualize performance data, track system health, and set up automated alerts based on defined thresholds. The Amazon CloudWatch data source in Grafana extends these capabilities by allowing you to query CloudWatch data and create rich, interactive visualizations that can be correlated with data from other systems within unified dashboards.
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
|
||||
Only users with the organization administrator role can add data sources.
|
||||
Administrators can also [provision the data source](#provision-the-data-source) with Grafana's provisioning system, and should [control pricing](#control-pricing) and [manage service quotas](#manage-service-quotas) accordingly.
|
||||
Grafana includes native support for the Amazon CloudWatch plugin, so there's no need to install a plugin.
|
||||
|
||||
Once you've added the data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
|
||||
The following documents will help you get started working with the CloudWatch data source:
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To troubleshoot issues while setting up the CloudWatch data source, check the `/var/log/grafana/grafana.log` file.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure the data source
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under Your connections, click **Data sources**.
|
||||
1. Enter `CloudWatch` in the search bar.
|
||||
1. Click **CloudWatch**.
|
||||
|
||||
The **Settings** tab of the data source is displayed.
|
||||
|
||||
### Configure AWS authentication
|
||||
|
||||
A Grafana plugin's requests to AWS are made on behalf of an AWS Identity and Access Management (IAM) role or IAM user.
|
||||
The IAM user or IAM role must have the associated policies to perform certain API actions.
|
||||
|
||||
For authentication options and configuration details, refer to [AWS authentication](aws-authentication/).
|
||||
|
||||
#### IAM policy examples
|
||||
|
||||
To read CloudWatch metrics and EC2 tags, instances, regions, and alarms, you must grant Grafana permissions via IAM.
|
||||
You can attach these permissions to the IAM role or IAM user you configured in [AWS authentication](aws-authentication/).
|
||||
|
||||
##### Metrics-only permissions
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowReadingMetricsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"cloudwatch:DescribeAlarmsForMetric",
|
||||
"cloudwatch:DescribeAlarmHistory",
|
||||
"cloudwatch:DescribeAlarms",
|
||||
"cloudwatch:ListMetrics",
|
||||
"cloudwatch:GetMetricData",
|
||||
"cloudwatch:GetInsightRuleReport"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingTagsInstancesRegionsFromEC2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeRegions"],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourcesForTags",
|
||||
"Effect": "Allow",
|
||||
"Action": "tag:GetResources",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourceMetricsFromPerformanceInsights",
|
||||
"Effect": "Allow",
|
||||
"Action": "pi:GetResourceMetrics",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
##### Logs-only permissions
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowReadingLogsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:GetLogGroupFields",
|
||||
"logs:StartQuery",
|
||||
"logs:StopQuery",
|
||||
"logs:GetQueryResults",
|
||||
"logs:GetLogEvents"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingTagsInstancesRegionsFromEC2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeRegions"],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourcesForTags",
|
||||
"Effect": "Allow",
|
||||
"Action": "tag:GetResources",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
##### Metrics and logs permissions
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowReadingMetricsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"cloudwatch:DescribeAlarmsForMetric",
|
||||
"cloudwatch:DescribeAlarmHistory",
|
||||
"cloudwatch:DescribeAlarms",
|
||||
"cloudwatch:ListMetrics",
|
||||
"cloudwatch:GetMetricData",
|
||||
"cloudwatch:GetInsightRuleReport"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourceMetricsFromPerformanceInsights",
|
||||
"Effect": "Allow",
|
||||
"Action": "pi:GetResourceMetrics",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingLogsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:GetLogGroupFields",
|
||||
"logs:StartQuery",
|
||||
"logs:StopQuery",
|
||||
"logs:GetQueryResults",
|
||||
"logs:GetLogEvents"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingTagsInstancesRegionsFromEC2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeRegions"],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourcesForTags",
|
||||
"Effect": "Allow",
|
||||
"Action": "tag:GetResources",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
##### Cross-account observability permissions
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": ["oam:ListSinks", "oam:ListAttachedLinks"],
|
||||
"Effect": "Allow",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Cross-account observability lets you to retrieve metrics and logs across different accounts in a single region but you can't query EC2 Instance Attributes across accounts because those come from the EC2 API and not the CloudWatch API.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Configure CloudWatch settings
|
||||
|
||||
#### Namespaces of Custom Metrics
|
||||
|
||||
Grafana can't load custom namespaces through the CloudWatch [GetMetricData API](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html).
|
||||
|
||||
To make custom metrics appear in the data source's query editor fields, specify the names of the namespaces containing the custom metrics in the data source configuration's _Namespaces of Custom Metrics_ field.
|
||||
The field accepts multiple namespaces separated by commas.
|
||||
|
||||
#### Timeout
|
||||
|
||||
Configure the timeout specifically for CloudWatch Logs queries.
|
||||
|
||||
Log queries don't keep a single request open, and instead periodically poll for results.
|
||||
Therefore, they don't recognize the standard Grafana query timeout.
|
||||
Because of limits on concurrently running queries in CloudWatch, they can also take longer to finish.
|
||||
|
||||
#### X-Ray trace links
|
||||
|
||||
To automatically add links in your logs when the log contains the `@xrayTraceId` field, link an X-Ray data source in the "X-Ray trace link" section of the data source configuration.
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/xray-trace-link-configuration-8-2.png" max-width="800px" class="docs-image--no-shadow" caption="Trace link configuration" >}}
|
||||
|
||||
The data source select contains only existing data source instances of type X-Ray.
|
||||
To use this feature, you must already have an X-Ray data source configured.
|
||||
For details, see the [X-Ray data source docs](/grafana/plugins/grafana-x-ray-datasource/).
|
||||
|
||||
To view the X-Ray link, select the log row in either the Explore view or dashboard [Logs panel](ref:logs) to view the log details section.
|
||||
|
||||
To log the `@xrayTraceId`, see the [AWS X-Ray documentation](https://docs.amazonaws.cn/en_us/xray/latest/devguide/xray-services.html).
|
||||
|
||||
To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/xray-link-log-details-8-2.png" max-width="800px" class="docs-image--no-shadow" caption="Trace link in log details" >}}
|
||||
|
||||
### Configure the data source with grafana.ini
|
||||
|
||||
The Grafana [configuration file](ref:configure-grafana-aws) includes an `AWS` section where you can configure data source options:
|
||||
|
||||
| Configuration option | Description |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `allowed_auth_providers` | Specifies which authentication providers are allowed for the CloudWatch data source. The following providers are enabled by default in open-source Grafana: `default` (AWS SDK default), keys (Access and secret key), credentials (Credentials file), ec2_IAM_role (EC2 IAM role). |
|
||||
| `assume_role_enabled` | Allows you to disable `assume role (ARN)` in the CloudWatch data source. The assume role (ARN) is enabled by default in open-source Grafana. |
|
||||
| `list_metrics_page_limit` | Sets the limit of List Metrics API pages. When a custom namespace is specified in the query editor, the [List Metrics API](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html) populates the _Metrics_ field and _Dimension_ fields. The API is paginated and returns up to 500 results per page, and the data source also limits the number of pages to 500 by default. This setting customizes that limit. |
|
||||
|
||||
### Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
#### Provisioning examples
|
||||
|
||||
##### Using AWS SDK (default)
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: cloudwatch
|
||||
jsonData:
|
||||
authType: default
|
||||
defaultRegion: eu-west-2
|
||||
```
|
||||
|
||||
##### Using credentials' profile name (non-default)
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: cloudwatch
|
||||
jsonData:
|
||||
authType: credentials
|
||||
defaultRegion: eu-west-2
|
||||
customMetricsNamespaces: 'CWAgent,CustomNameSpace'
|
||||
profile: secondary
|
||||
```
|
||||
|
||||
##### Using accessKey and secretKey
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: cloudwatch
|
||||
jsonData:
|
||||
authType: keys
|
||||
defaultRegion: eu-west-2
|
||||
secureJsonData:
|
||||
accessKey: '<your access key>'
|
||||
secretKey: '<your secret key>'
|
||||
```
|
||||
|
||||
##### Using AWS SDK Default and ARN of IAM Role to Assume
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: cloudwatch
|
||||
jsonData:
|
||||
authType: default
|
||||
assumeRoleArn: arn:aws:iam::123456789012:root
|
||||
defaultRegion: eu-west-2
|
||||
```
|
||||
|
||||
## Query the data source
|
||||
|
||||
The CloudWatch data source can query data from both CloudWatch metrics and CloudWatch Logs APIs, each with its own specialized query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Query caching
|
||||
|
||||
When you enable [query and resource caching](/docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching), Grafana temporarily stores the results of data source queries and resource requests. Query caching is available in CloudWatch Metrics in Grafana Cloud and Grafana Enterprise. It is not available in CloudWatch Logs Insights due to how query results are polled from AWS.
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
- [Configure the CloudWatch data source](ref:configure-cloudwatch)
|
||||
- [CloudWatch query editor](ref:cloudwatch-query-editor)
|
||||
- [Templates and variables](ref:cloudwatch-template-variables)
|
||||
- [Configure AWS authentication](ref:cloudwatch-aws-authentication)
|
||||
|
||||
## Import pre-configured dashboards
|
||||
|
||||
The CloudWatch data source ships with curated and pre-configured dashboards for five of the most popular AWS services:
|
||||
The CloudWatch data source includes curated, pre-configured dashboards for five popular AWS services:
|
||||
|
||||
- **Amazon Elastic Compute Cloud:** `Amazon EC2`
|
||||
- **Amazon Elastic Block Store:** `Amazon EBS`
|
||||
|
@ -378,40 +130,29 @@ The CloudWatch data source ships with curated and pre-configured dashboards for
|
|||
- **Amazon CloudWatch Logs:** `Amazon CloudWatch Logs`
|
||||
- **Amazon Relational Database Service:** `Amazon RDS`
|
||||
|
||||
**To import curated dashboards:**
|
||||
To import curated dashboards:
|
||||
|
||||
1. Navigate to the data source's [configuration page](#configure-the-data-source).
|
||||
1. Select the **Dashboards** tab.
|
||||
1. Navigate to the data source's configuration page.
|
||||
1. Click the **Dashboards** tab.
|
||||
|
||||
This displays the curated selection of importable dashboards.
|
||||
|
||||
1. Select **Import** for the dashboard to import.
|
||||
1. Click **Import** for each dashboard you want to import.
|
||||
|
||||
{{< figure src="/static/img/docs/v65/cloudwatch-dashboard-import.png" caption="CloudWatch dashboard import" >}}
|
||||
 CloudWatch pre-configured dashboards
|
||||
|
||||
**To customize an imported dashboard:**
|
||||
To customize one of these dashboards, Grafana recommends saving it under a different name; otherwise, Grafana upgrades will overwrite your customizations with the new version.
|
||||
|
||||
To customize one of these dashboards, we recommend that you save it under a different name.
|
||||
If you don't, upgrading Grafana can overwrite the customized dashboard with the new version.
|
||||
## Get the most out of the data source
|
||||
|
||||
## Create queries for alerting
|
||||
After installing and configuring the Amazon CloudWatch data source, you can:
|
||||
|
||||
Alerting requires queries that return numeric data, which CloudWatch Logs support.
|
||||
For example, you can enable alerts through the use of the `stats` command.
|
||||
|
||||
This is also a valid query for alerting on messages that include the text "Exception":
|
||||
|
||||
```
|
||||
filter @message like /Exception/
|
||||
| stats count(*) as exceptionCount by bin(1h)
|
||||
| sort exceptionCount desc
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you receive an error like `input data must be a wide series but got ...` when trying to alert on a query, make sure that your query returns valid numeric data that can be output to a Time series panel.
|
||||
{{< /admonition >}}
|
||||
|
||||
For more information on Grafana alerts, refer to [Alerting](ref:alerting).
|
||||
- Create a wide variety of [visualizations](ref:visualizations)
|
||||
- Configure and use [templates and variables](ref:variables)
|
||||
- Add [transformations](ref:transformations)
|
||||
- Add [annotations](ref:annotate-visualizations)
|
||||
- Set up [alerting](ref:alerting)
|
||||
- Optimize performance with [query caching](ref:query-caching)
|
||||
|
||||
## Control pricing
|
||||
|
||||
|
@ -421,8 +162,9 @@ Each time you select a dimension in the query editor, Grafana issues a `ListMetr
|
|||
Each time you change queries in the query editor, Grafana issues a new request to the `GetMetricData` API.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Grafana replaced all `GetMetricStatistics` API requests with calls to GetMetricData to provide better support for CloudWatch metric math, and enables the automatic generation of search expressions when using wildcards or disabling the `Match Exact` option.
|
||||
The `GetMetricStatistics` API qualified for the CloudWatch API free tier, but `GetMetricData` calls don't.
|
||||
Grafana now uses the `GetMetricData` API instead of `GetMetricStatistics` for CloudWatch queries. This change improves support for CloudWatch metric math and allows Grafana to automatically generate search expressions when you use wildcards or disable the `Match Exact` option.
|
||||
|
||||
Unlike `GetMetricStatistics` requests, `GetMetricData` requests do not qualify for the CloudWatch API free tier.
|
||||
{{< /admonition >}}
|
||||
|
||||
For more information, refer to the [CloudWatch pricing page](https://aws.amazon.com/cloudwatch/pricing/).
|
||||
|
@ -433,7 +175,7 @@ AWS defines quotas, or limits, for resources, actions, and items in your AWS acc
|
|||
Depending on the number of queries in your dashboard and the number of users accessing the dashboard, you might reach the usage limits for various CloudWatch and CloudWatch Logs resources.
|
||||
Quotas are defined per account and per region.
|
||||
|
||||
If you use multiple regions or configured more than one CloudWatch data source to query against multiple accounts, you must request a quota increase for each account and region in which you reach the limit.
|
||||
If you use multiple regions or have configured more than one CloudWatch data source to query against multiple accounts, you must request a quota increase for each account and region in which you reach the limit.
|
||||
|
||||
To request a quota increase, visit the [AWS Service Quotas console](https://console.aws.amazon.com/servicequotas/home?r#!/services/monitoring/quotas/L-5E141212).
|
||||
For more information, refer to the AWS documentation for [Service Quotas](https://docs.aws.amazon.com/servicequotas/latest/userguide/intro.html) and [CloudWatch limits](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html).
|
||||
|
@ -442,8 +184,4 @@ For more information, refer to the AWS documentation for [Service Quotas](https:
|
|||
|
||||
The CloudWatch plugin enables you to monitor and troubleshoot applications across multiple regional accounts. Using cross-account observability, you can seamlessly search, visualize and analyze metrics and logs without worrying about account boundaries.
|
||||
|
||||
To use this feature, configure in the [AWS console under CloudWatch Settings](https://aws.amazon.com/blogs/aws/new-amazon-cloudwatch-cross-account-observability/), a monitoring and source account, and then add the necessary IAM permissions as described above.
|
||||
|
||||
## CloudWatch Logs data protection
|
||||
|
||||
CloudWatch Logs can safeguard data by using log group data protection policies. If you have data protection enabled for a log group, then any sensitive data that matches the data identifiers you've selected will be masked. In order to view masked data you will need to have the `logs:Unmask` IAM permission enabled. See the AWS documentation on how to [help protect sensitive log data with masking](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html) to learn more about this.
|
||||
To use this feature, configure a monitoring and source account in the [AWS console under CloudWatch Settings](https://aws.amazon.com/blogs/aws/new-amazon-cloudwatch-cross-account-observability/), and then add the necessary IAM permissions as described above.
|
||||
|
|
|
@ -15,7 +15,7 @@ labels:
|
|||
- oss
|
||||
menuTitle: AWS authentication
|
||||
title: Configure AWS authentication
|
||||
weight: 200
|
||||
weight: 400
|
||||
refs:
|
||||
configure-grafana-assume-role-enabled:
|
||||
- pattern: /docs/grafana/
|
||||
|
@ -31,13 +31,13 @@ refs:
|
|||
|
||||
# Configure AWS authentication
|
||||
|
||||
A Grafana data source plugin's requests to AWS are made on behalf of an AWS Identity and Access Management (IAM) [role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) or IAM [user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html).
|
||||
The IAM user or IAM role must have the associated policies to perform certain API actions to query the data in the data source.
|
||||
Since these policies are specific to each data source, refer to each data source plugin's documentation for details.
|
||||
Grafana data source plugins make requests to AWS on behalf of an AWS Identity and Access Management (IAM) [role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) or IAM [user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html).
|
||||
The IAM user or IAM role must have the associated policies to perform certain API actions for querying data source data.
|
||||
Since these policies are specific to each data source, refer to individual data source plugin documentation for details.
|
||||
|
||||
All requests to AWS APIs are performed on the server side by the Grafana backend using the official [AWS SDK](https://github.com/aws/aws-sdk-go).
|
||||
The Grafana backend performs all AWS API requests server-side using the official [AWS SDK](https://github.com/aws/aws-sdk-go).
|
||||
|
||||
This topic has the following sections:
|
||||
This document explores the following topics:
|
||||
|
||||
- [Select an authentication method](#select-an-authentication-method)
|
||||
- [Assume a role](#assume-a-role)
|
||||
|
@ -47,43 +47,36 @@ This topic has the following sections:
|
|||
|
||||
## Select an authentication method
|
||||
|
||||
Depending on your configuration and the environment your instance of Grafana is running in, you'll have different authentication methods to select from.
|
||||
Available authentication methods depend on your configuration and the environment where Grafana runs.
|
||||
|
||||
Open source Grafana enables the `AWS SDK Default`, `Credentials file`, and `Access and secret key` methods by default. Cloud Grafana enables `Access and secret key` by default. If necessary, you can enable or disable particular auth providers if you have server configuration access. For more information, refer to the [`allowed_auth_providers` documentation](ref:configure-grafana-allowed-auth-providers).
|
||||
Open source Grafana enables the `AWS SDK Default`, `Credentials file`, and `Access and secret key` methods by default. Cloud Grafana enables only `Access and secret key` by default. Users with server configuration access can enable or disable specific auth providers as needed. For more information, refer to the [`allowed_auth_providers` documentation](ref:configure-grafana-allowed-auth-providers).
|
||||
|
||||
- `AWS SDK Default` performs no custom configuration and instead uses the [default provider](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) as specified by the [AWS SDK for Go](https://github.com/aws/aws-sdk-go).
|
||||
It requires you to configure your AWS credentials outside of grafana, such as with [the CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html), or by [attaching credentials directly to an EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), [in an ECS task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html), or for a [Service Account in a Kubernetes cluster](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). With `AWS SDK Default` you can attach permissions directly to the data source or you can use it combination with the optional [`Assume Role ARN`](#assume-a-role) field.
|
||||
- `Credentials file` corresponds directly to the [SharedCredentialsProvider](https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/#SharedCredentialsProvider) provider in the [AWS SDK for Go](https://github.com/aws/aws-sdk-go).
|
||||
It reads the AWS shared credentials file to find a given profile.
|
||||
While `AWS SDK Default` will also find the shared credentials file, this option allows you to specify which profile to use without using environment variables.
|
||||
This option doesn't have any implicit fallbacks to other credential providers, and it fails if the credentials provided from the file aren't correct.
|
||||
- `Access and secret key` corresponds to the [StaticProvider](https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/#StaticProvider) and uses the given access key ID and secret key to authenticate.
|
||||
This method doesn't have any fallbacks, and will fail if the provided key pair doesn't work. This is the primary authentication method for Grafana Cloud.
|
||||
- `Grafana Assume Role` - With this auth provider option, Grafana Cloud users create an AWS IAM role that has a trust relationship with Grafana's AWS account. Grafana then uses [STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) to generate temporary credentials on its behalf. Users with this option enabled no longer need to generate secret and access keys for users. Refer to [Use Grafana Assume Role](/docs/grafana/latest/datasources/aws-cloudwatch/aws-authentication/#use-grafana-assume-role) for further detail.
|
||||
- `AWS SDK Default` uses the [default provider](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) from the [AWS SDK for Go](https://github.com/aws/aws-sdk-go) without custom configuration.
|
||||
This method requires configuring AWS credentials outside Grafana through [the CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html), or by [attaching credentials directly to an EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), [in an ECS task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html), or for a [Service Account in a Kubernetes cluster](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). You can attach permissions directly to the data source with AWS SDK Default or combine it with the optional [`Assume Role ARN`](#assume-a-role) field.
|
||||
- `Credentials file` maps to the [SharedCredentialsProvider](https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/#SharedCredentialsProvider) provider in the [AWS SDK for Go](https://github.com/aws/aws-sdk-go).
|
||||
This method reads the AWS shared credentials file for a specified profile.
|
||||
Unlike `AWS SDK Default` which also reads the shared credentials file, this option lets you specify a profile directly without environment variables.
|
||||
This option provides no fallback to other credential providers and fails if the file credentials are invalid.
|
||||
- `Access and secret key` corresponds to the [StaticProvider](https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/#StaticProvider) and authenticates using a specified access key ID and secret key pair.
|
||||
This method doesn't provide fallback authentication and fails if the key pair is invalid. Grafana Cloud uses this as the primary authentication method.
|
||||
- `Grafana Assume Role` - With this authentication method, Grafana Cloud users create an AWS IAM role that has a trust relationship with Grafana's AWS account. Grafana uses [STS](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) to generate temporary credentials on its behalf. This method eliminates the need to generate secret and access keys for users Refer to [Use Grafana Assume Role](/docs/grafana/latest/datasources/aws-cloudwatch/aws-authentication/#use-grafana-assume-role) for more information.
|
||||
- `Workspace IAM role` corresponds to the [EC2RoleProvider](https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/ec2rolecreds/#EC2RoleProvider).
|
||||
The EC2RoleProvider pulls credentials for a role attached to the EC2 instance that Grafana runs on.
|
||||
You can also achieve this by using the authentication method AWS SDK Default, but this option is different as it doesn't have any fallbacks.
|
||||
This option is enabled by default only in Amazon Managed Grafana.
|
||||
The EC2RoleProvider retrieves credentials from a role attached to the EC2 instance running Grafana.
|
||||
While AWS SDK Default can achieve similar results, this option provides no fallback authentication. Amazon Managed Grafana enables this option by default.
|
||||
|
||||
## Assume a role
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Assume a role is required for the Grafana Assume Role.
|
||||
{{< /admonition >}}
|
||||
Specify an IAM role to assume in the **Assume Role ARN** field.
|
||||
|
||||
You can specify an IAM role to assume in the **Assume Role ARN** field.
|
||||
When you configure **Assume Role ARN**, Grafana uses the provided credentials to perform an [sts:AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) call. The primary authentication method only needs permission to assume the role, while the assumed role requires CloudWatch access permissions.
|
||||
|
||||
When this field is filled in, Grafana uses the provided credentials to perform an [sts:AssumeRole](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) call. In this scenario, the primary authentication method does not need permission to access CloudWatch directly; it just needs the ability to assume a role, while the role it assumes should have permission to access CloudWatch.
|
||||
For example, you might use one set of long-term credentials across all AWS data sources but want to limit each data source's AWS access (such as separating staging and production data access). You could create separate credentials for each data source with distinct permissions, but this approach requires managing and rotating multiple secret and access keys across many data source instances.
|
||||
|
||||
For example, you may have one set of long term credentials for all of your AWS data sources. However, you want to limit the access each data source has to AWS (maybe one accesses staging data and another production data, for example). You could create separate credentials for each data source, each maintaining its own set of permissions to various resources. However, depending on how many data sources instances you have and how you've set them up, that might mean rotating and managing many different secret and access keys.
|
||||
|
||||
Instead, using the assume role functionality, you could have one set of AWS credentials for all of your AWS data sources that has only one permission—the permission to assume a role with STS. Then you could create a separate IAM role for each data source that specifies which permissions that data source can temporarily assume. Since IAM roles are not credentials, there's no need to rotate them and they may be easier to manage.
|
||||
|
||||
The Grafana Assume Role also helps facilitate this. Using this role, Grafana's AWS account acts as the primary credential, having only the permission to assume roles in other accounts. You can then create IAM roles for Grafana's account to assume. For more information, refer to [Use Grafana assume role](#use-grafana-assume-role).
|
||||
Instead, assume role functionality lets you use one set of AWS credentials across all AWS data sources with a single permission: the ability to assume roles through STS. You then create separate IAM roles for each data source that specify temporary permissions. Since IAM roles are not credentials, they require no rotation and simplify management.
|
||||
|
||||
If the **Assume Role ARN** field is left empty, Grafana uses the provided credentials from the selected authentication method directly, and permissions to AWS data must be attached directly to those credentials. The **Assume Role ARN** field is optional for all authentication methods except for Grafana Assume Role.
|
||||
|
||||
To disable this feature in open source Grafana or Grafana Enterprise, refer to the [`assume_role_enabled` documentation](ref:configure-grafana-assume-role-enabled).
|
||||
To disable this feature in open source Grafana or Grafana Enterprise, refer to [`assume_role_enabled`](ref:configure-grafana-assume-role-enabled).
|
||||
|
||||
### Use an external ID
|
||||
|
||||
|
@ -91,48 +84,25 @@ To disable this feature in open source Grafana or Grafana Enterprise, refer to t
|
|||
You cannot use an external ID for the Grafana Assume Role authentication provider.
|
||||
{{< /admonition >}}
|
||||
|
||||
To assume a role in another account that was created with an external ID, specify the external ID in the **External ID** field.
|
||||
To assume a role in another account created with an external ID, specify the external ID in the **External ID** field.
|
||||
|
||||
For more information, refer to the [AWS documentation on external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html).
|
||||
|
||||
When Grafana Assume Role is the selected authentication provider, Grafana is responsible for supplying and calling the external ID. It's displayed on the data source configuration page and is unique to your account. It's very important when creating an IAM role for `Grafana Assume Role` that you set a conditional that Grafana's AWS account can only assume your IAM role when a particular external ID is specified:
|
||||
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": {Grafana's AWS Account}
|
||||
},
|
||||
"Action": "sts:AssumeRole",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"sts:ExternalId": {External ID unique to your account}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Use a custom endpoint
|
||||
|
||||
You can specify a custom endpoint URL in the **Endpoint** field, which overrides the default generated endpoint for the AWS service API.
|
||||
Leave this field blank to use the default generated endpoint.
|
||||
Use the **Endpoint** field to specify a custom endpoint URL that overrides the default AWS service API endpoint. Leave this field blank to use the default generated endpoint.
|
||||
|
||||
For more information on why and how to use service endpoints, refer to the [AWS service endpoints documentation](https://docs.aws.amazon.com/general/latest/gr/rande.html).
|
||||
For more information about using service endpoints, refer to the [AWS service endpoints documentation](https://docs.aws.amazon.com/general/latest/gr/rande.html).
|
||||
|
||||
## Use an AWS credentials file
|
||||
|
||||
Create a file at `~/.aws/credentials`, the `HOME` path for the user running the `grafana-server` service.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you think you have the credentials file in the right location, but it's not working, try moving your `.aws` file to `/usr/share/grafana/` and grant your credentials file at most 0644 permissions.
|
||||
If the credentials file appears to be in the correct location but isn't working, move your `aws` file to `/usr/share/grafana/` and set the credentials file permissions to `0644`.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Credentials file example
|
||||
**Credentials file example:**
|
||||
|
||||
```bash
|
||||
[default]
|
||||
|
@ -143,12 +113,14 @@ region = us-west-2
|
|||
|
||||
## Use EKS IAM roles for service accounts
|
||||
|
||||
The Grafana process in the container runs as user 472 (called "grafana").
|
||||
When Kubernetes mounts your projected credentials, they're available by default to only the root user.
|
||||
EKS IAM roles for service accounts (IRSA) are an AWS EKS feature that allows pods to assume IAM roles without storing long-term credentials. When you configure IRSA in your EKS cluster, AWS injects temporary credentials into your pod as projected volume mounts.
|
||||
|
||||
To grant user 472 permission to access the credentials, and avoid falling back to the IAM role attached to the EC2 instance, you must provide a [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for your pod.
|
||||
In Grafana containers, the process runs as user `472` ("grafana").
|
||||
By default, Kubernetes mounts the projected credentials with permissions for the root user only.
|
||||
|
||||
### Security context example
|
||||
To grant user `472` permission to access the credentials, and prevent fallback to the IAM role attached to the EC2 instance, set a [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for your pod.
|
||||
|
||||
**Security context example:**
|
||||
|
||||
```yaml
|
||||
securityContext:
|
||||
|
@ -160,12 +132,14 @@ securityContext:
|
|||
## Use Grafana Assume Role
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Grafana Assume Role is only available in Grafana Cloud.
|
||||
|
||||
It's currently only available for Amazon CloudWatch and Athena.
|
||||
Grafana Assume Role is only available in Grafana Cloud for Amazon CloudWatch and Athena data sources.
|
||||
{{< /admonition >}}
|
||||
|
||||
The Grafana Assume Role authentication provider lets you authenticate with AWS without having to create and maintain long term AWS users or rotate their access and secret keys. Instead, you can create an IAM role that has permissions to access CloudWatch and a trust relationship with Grafana's AWS account. Grafana's AWS account then makes an STS request to AWS to create temporary credentials to access your AWS data. It makes this STS request by passing along an `externalID` that's unique per Cloud account, to ensure that Grafana Cloud users can only access their own AWS data. For more information, refer to the [AWS documentation on external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html).
|
||||
The Grafana Assume Role authentication provider lets you access AWS without creating or managing long-term AWS IAM users or rotating access keys. Instead, you can create an IAM role that has permissions to access CloudWatch and trusts a Grafana AWS account.
|
||||
|
||||
The Grafana AWS account then makes a Security Token Service (STS) request to generate temporary credentials for your AWS data. This request includes an `externalID` unique to your Grafana Cloud account, which ensures users can access only their own AWS resources.
|
||||
|
||||
For more information, refer to the [AWS documentation on external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html).
|
||||
|
||||
To use the Grafana Assume Role:
|
||||
|
||||
|
@ -177,7 +151,7 @@ To use the Grafana Assume Role:
|
|||
6. Give the role a name and description, and click **Create role**.
|
||||
7. Copy the ARN of the role you just created and paste it into the **Assume Role ARN** field on the **Settings** tab of CloudWatch data source configuration in Grafana.
|
||||
|
||||
Sample Trust Relationship for an IAM role:
|
||||
**Sample Trust Relationship for an IAM role:**
|
||||
|
||||
```
|
||||
{
|
||||
|
|
|
@ -0,0 +1,401 @@
|
|||
---
|
||||
aliases:
|
||||
- ../data-sources/aws-CloudWatch/
|
||||
- ../data-sources/aws-CloudWatch/preconfig-CloudWatch-dashboards/
|
||||
- ../data-sources/aws-CloudWatch/provision-CloudWatch/
|
||||
- CloudWatch/
|
||||
- preconfig-CloudWatch-dashboards/
|
||||
- provision-CloudWatch/
|
||||
description: This document provides configuration instructions for the CloudWatch data source.
|
||||
keywords:
|
||||
- grafana
|
||||
- CloudWatch
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure CloudWatch
|
||||
weight: 100
|
||||
refs:
|
||||
logs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
configure-grafana-aws:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
CloudWatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-CloudWatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-CloudWatch/aws-authentication/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
---
|
||||
|
||||
# Configure the Amazon CloudWatch data source
|
||||
|
||||
This document provides instructions for configuring the Amazon CloudWatch data source and explains available configuration options. For general information on adding and managing data sources, refer to [Data source management](ref:data-source-management).
|
||||
|
||||
## Before you begin
|
||||
|
||||
- You must have the `Organization administrator` role to configure the CloudWatch data source. Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system.
|
||||
|
||||
- Grafana comes with a built-in CloudWatch data source plugin, so you do not need to install a plugin.
|
||||
|
||||
- Familiarize yourself with your CloudWatch security configuration and gather any necessary security certificates, client certificates, and client keys.
|
||||
|
||||
## Add the CloudWatch data source
|
||||
|
||||
Complete the following steps to set up a new CloudWatch data source:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Click **Add new connection**
|
||||
1. Type `CloudWatch` in the search bar.
|
||||
1. Select the **CloudWatch data source**.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
Grafana takes you to the **Settings** tab, where you will set up your CloudWatch configuration.
|
||||
|
||||
## Configure the data source in the UI
|
||||
|
||||
The following are configuration options for the CloudWatch data source.
|
||||
|
||||
| **Setting** | **Description** |
|
||||
| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Name** | The data source name. Sets the name you use to refer to the data source in panels and queries. |
|
||||
| **Default** | Toggle to select as the default name in dashboard panels. When you go to a dashboard panel, this will be the default selected data source. |
|
||||
|
||||
Grafana plugin requests to AWS are made on behalf of an AWS Identity and Access Management (IAM) role or IAM user.
|
||||
The IAM user or IAM role must have the associated policies to perform certain API actions.
|
||||
|
||||
For authentication options and configuration details, refer to [AWS authentication](aws-authentication/).
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Specify which AWS credentials chain to use. A Grafana plugin's requests to AWS are made on behalf of an IAM role or IAM user. The IAM user or IAM role must have the necessary policies to perform the required API actions. |
|
||||
|
||||
**Access & secret key:**
|
||||
|
||||
You must use both an access key ID and a secret access key to authenticate.
|
||||
|
||||
| Setting | Description |
|
||||
| --------------------- | ---------------------------- |
|
||||
| **Access Key ID** | Enter your key ID. |
|
||||
| **Secret Access Key** | Enter the secret access key. |
|
||||
|
||||
**Assume Role**:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Assume Role ARN** | _Optional._ Specify the ARN of an IAM role to assume. This ensures the selected authentication method is used to assume the role, not used directly. |
|
||||
| **External ID** | If you're assuming a role in another AWS account that requires an external ID, specify it here. |
|
||||
|
||||
**Additional Settings:**
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Endpoint** | _Optional_. Specify a custom endpoint for the AWS service. |
|
||||
| **Default Region** | Specify the AWS region. Example: If the region is US West (Oregon), use `us-west-2`. |
|
||||
| **Namespaces of Custom Metrics** | Add one or more custom metric namespaces, separated by commas (for example,`Namespace1,Namespace2`). Grafana can't automatically load custom namespaces using the [CloudWatch GetMetricData API](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html). To make custom metrics available in the query editor, manually specify the namespaces in the `Namespaces of Custom Metrics` field in the data source configuration. |
|
||||
|
||||
**CloudWatch Logs**:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Query timeout result** | Grafana polls CloudWatch Logs every second until AWS returns a `Done` status or the timeout is reached. An error is returned if the timeout is exceeded. For alerting, the timeout defined in the Grafana configuration file takes precedence. Enter a valid duration string, such as `30m`, `30s` or `200ms`. The default is `30m`. |
|
||||
| **Default Log Groups** | _Optional_. Specify the default log groups for CloudWatch Logs queries. |
|
||||
|
||||
**X-Ray trace link:**
|
||||
|
||||
| Setting | Description |
|
||||
| --------------- | ----------------------------------------------------- |
|
||||
| **Data source** | Select the X-Ray data source from the drop-down menu. |
|
||||
|
||||
Grafana automatically creates a link to a trace in X-Ray data source if logs contain the `@xrayTraceId` field. To use this feature, you must already have an X-Ray data source configured. For details, see the [X-Ray data source docs](/grafana/plugins/grafana-X-Ray-datasource/). To view the X-Ray link, select the log row in either the Explore view or dashboard [Logs panel](ref:logs) to view the log details section.
|
||||
|
||||
To log the `@xrayTraceId`, refer to the [AWS X-Ray documentation](https://docs.amazonaws.cn/en_us/xray/latest/devguide/xray-services.html). To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
|
||||
**Private data source connect** - _Only for Grafana Cloud users._
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Private data source connect** | Establishes a private, secured connection between a Grafana Cloud stack and data sources within a private network. Use the drop-down to locate the PDC URL. For setup instructions, refer to [Private data source connect (PDC)](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc). Click **Manage private data source connect** to open your PDC connection page and view your configuration details. |
|
||||
|
||||
After configuring your Amazon CloudWatch data source options, click **Save & test** at the bottom to test the connection. You should see a confirmation dialog box that says:
|
||||
|
||||
{{< figure src="/media/docs/CloudWatch/CloudWatch-config-success-message.png" >}}
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To troubleshoot issues while setting up the CloudWatch data source, check the `/var/log/grafana/grafana.log` file. Common issues include invalid credentials, missing regions and metrics-only credentials.
|
||||
{{< /admonition >}}
|
||||
|
||||
### IAM policy examples
|
||||
|
||||
To read CloudWatch metrics and EC2 tags, instances, regions, and alarms, you must grant Grafana permissions via IAM.
|
||||
You can attach these permissions to the IAM role or IAM user you configured in [AWS authentication](aws-authentication/).
|
||||
|
||||
**Metrics-only permissions:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowReadingMetricsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"CloudWatch:DescribeAlarmsForMetric",
|
||||
"CloudWatch:DescribeAlarmHistory",
|
||||
"CloudWatch:DescribeAlarms",
|
||||
"CloudWatch:ListMetrics",
|
||||
"CloudWatch:GetMetricData",
|
||||
"CloudWatch:GetInsightRuleReport"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingTagsInstancesRegionsFromEC2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeRegions"],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourcesForTags",
|
||||
"Effect": "Allow",
|
||||
"Action": "tag:GetResources",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourceMetricsFromPerformanceInsights",
|
||||
"Effect": "Allow",
|
||||
"Action": "pi:GetResourceMetrics",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Logs-only permissions:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowReadingLogsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:GetLogGroupFields",
|
||||
"logs:StartQuery",
|
||||
"logs:StopQuery",
|
||||
"logs:GetQueryResults",
|
||||
"logs:GetLogEvents"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingTagsInstancesRegionsFromEC2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeRegions"],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourcesForTags",
|
||||
"Effect": "Allow",
|
||||
"Action": "tag:GetResources",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Metrics and logs permissions:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowReadingMetricsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"CloudWatch:DescribeAlarmsForMetric",
|
||||
"CloudWatch:DescribeAlarmHistory",
|
||||
"CloudWatch:DescribeAlarms",
|
||||
"CloudWatch:ListMetrics",
|
||||
"CloudWatch:GetMetricData",
|
||||
"CloudWatch:GetInsightRuleReport"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourceMetricsFromPerformanceInsights",
|
||||
"Effect": "Allow",
|
||||
"Action": "pi:GetResourceMetrics",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingLogsFromCloudWatch",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:GetLogGroupFields",
|
||||
"logs:StartQuery",
|
||||
"logs:StopQuery",
|
||||
"logs:GetQueryResults",
|
||||
"logs:GetLogEvents"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingTagsInstancesRegionsFromEC2",
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:DescribeTags", "ec2:DescribeInstances", "ec2:DescribeRegions"],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Sid": "AllowReadingResourcesForTags",
|
||||
"Effect": "Allow",
|
||||
"Action": "tag:GetResources",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### Cross-account observability permissions
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": ["oam:ListSinks", "oam:ListAttachedLinks"],
|
||||
"Effect": "Allow",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Cross-account observability lets you retrieve metrics and logs across different accounts in a single region, but you can't query EC2 Instance Attributes across accounts because those come from the EC2 API and not the CloudWatch API.
|
||||
{{< /admonition >}}
|
||||
|
||||
For more information on configuring authentication, refer to [Configure AWS authentication](ref:CloudWatch-aws-authentication).
|
||||
|
||||
### CloudWatch Logs data protection
|
||||
|
||||
CloudWatch Logs can protect data by applying log group data protection policies. When data protection is enabled for a log group, any sensitive data that matches the identifiers you select is automatically masked. To view masked data, your IAM role or user must have the `logs:Unmask` permission. For more details, refer to [the AWS guide](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data.html) on masking sensitive log data.
|
||||
|
||||
### Configure the data source with grafana.ini
|
||||
|
||||
The Grafana [configuration file](ref:configure-grafana-aws) includes an `AWS` section where you can configure data source options:
|
||||
|
||||
| Configuration option | Description |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `allowed_auth_providers` | Specifies which authentication providers are allowed for the CloudWatch data source. The following providers are enabled by default in open source Grafana: `default` (AWS SDK default), `keys` (Access and secret key), `credentials` (Credentials file), `ec2_IAM_role` (EC2 IAM role). |
|
||||
| `assume_role_enabled` | Allows you to disable `assume role (ARN)` in the CloudWatch data source. The assume role (ARN) is enabled by default in open source Grafana. |
|
||||
| `list_metrics_page_limit` | Sets the limit of List Metrics API pages. When a custom namespace is specified in the query editor, the [List Metrics API](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html) populates the _Metrics_ field and _Dimension_ fields. The API is paginated and returns up to 500 results per page, and the data source also limits the number of pages to 500 by default. This setting customizes that limit. |
|
||||
|
||||
### Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning and available configuration options, refer to [Provision Grafana](ref:provisioning-data-sources).
|
||||
|
||||
**Using AWS SDK (default)**:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: CloudWatch
|
||||
jsonData:
|
||||
authType: default
|
||||
defaultRegion: eu-west-2
|
||||
```
|
||||
|
||||
**Using credentials' profile name (non-default)**:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: CloudWatch
|
||||
jsonData:
|
||||
authType: credentials
|
||||
defaultRegion: eu-west-2
|
||||
customMetricsNamespaces: 'CWAgent,CustomNameSpace'
|
||||
profile: secondary
|
||||
```
|
||||
|
||||
**Using `accessKey` and `secretKey`**:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: CloudWatch
|
||||
jsonData:
|
||||
authType: keys
|
||||
defaultRegion: eu-west-2
|
||||
secureJsonData:
|
||||
accessKey: '<your access key>'
|
||||
secretKey: '<your secret key>'
|
||||
```
|
||||
|
||||
**Using AWS SDK Default and ARN of IAM Role to Assume:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: CloudWatch
|
||||
type: CloudWatch
|
||||
jsonData:
|
||||
authType: default
|
||||
assumeRoleArn: arn:aws:iam::123456789012:root
|
||||
defaultRegion: eu-west-2
|
||||
```
|
|
@ -17,7 +17,7 @@ labels:
|
|||
- oss
|
||||
menuTitle: Query editor
|
||||
title: Amazon CloudWatch query editor
|
||||
weight: 300
|
||||
weight: 200
|
||||
refs:
|
||||
query-transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
|
@ -34,12 +34,26 @@ refs:
|
|||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
add-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
---
|
||||
|
||||
# Amazon CloudWatch query editor
|
||||
|
||||
This topic explains querying specific to the CloudWatch data source.
|
||||
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
|
||||
Grafana provides a query editor for the CloudWatch data source, which allows you to query, visualize, and alert on logs and metrics stored in Amazon CloudWatch. It is located on the [Explore](ref:explore) page. For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
|
||||
## Choose a query editing mode
|
||||
|
||||
|
@ -48,49 +62,100 @@ The CloudWatch data source can query data from both CloudWatch metrics and Cloud
|
|||
- [CloudWatch Metrics](#query-cloudwatch-metrics)
|
||||
- [CloudWatch Logs](#query-cloudwatch-logs)
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/cloudwatch-query-editor-api-modes-8.3.0.png" max-width="500px" class="docs-image--right" caption="CloudWatch API modes" >}}
|
||||
Select the API to query using the drop-down to the right of the **Region** setting.
|
||||
|
||||
Select which API to query by using the query mode switch on top of the editor.
|
||||
## CloudWatch Metrics query editor components
|
||||
|
||||
The following are the components of the CloudWatch query editor.
|
||||
|
||||
| **Setting** | **Description** |
|
||||
| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Region** | Select an AWS region if it differs from the default. |
|
||||
| **Namespace** | The AWS service namespace. Examples: `AWS/EC2`, `AWS_Lambda`. |
|
||||
| **Metric name** | The name of the metric you want to visualize. Example: `CPUUtilization`. |
|
||||
| **Statistic** | Choose how to aggregate your data. Examples: `sum`, `average`, `maximum`. |
|
||||
| **Dimensions** | Select dimensions from the drop-down. Examples: `InstanceId`, `FunctionName`, `latency`. You can add several dimensions to your query. |
|
||||
| **Match exact** | _Optional_. When enabled, this option restricts query results to metrics that precisely match the specified dimensions and their values. All dimensions of the queried metric must be explicitly defined in the query to ensure an exact schema match. If disabled, the query will also return metrics that match the defined schema but possess additional dimensions. |
|
||||
| **ID** | _Optional_. Unique identifier required by the GetMetricData API for referencing queries in math expressions. Must start with a lowercase letter and can include letters, numbers, and underscores. If not specified, Grafana generates an ID using the pattern `query[refId]` (for example, `queryA`for the first query row). |
|
||||
| **Period** | The minimum time interval, in seconds, between data points. The default is `auto`. Valid values are 1, 5, 10, 30, or any multiple of 60. When set to auto or left blank, Grafana calculates the period using time range in seconds / 2000, then rounds up to the next value (60, 300, 900, 3600, 21600, 86400) based on the [CloudWatch retention policy](https://aws.amazon.com/about-aws/whats-new/2016/11/cloudwatch-extends-metrics-retention-and-new-user-interface/). |
|
||||
| **Label** | _Optional_. Add a customized time series legend name. The label field overrides the default metric legend name using CloudWatch dynamic labels. Time-based dynamic labels like ${MIN_MAX_TIME_RANGE} derive legend values from the current timezone in the time range picker. For the full list of label patterns and limitations, refer to [CloudWatch dynamic labels](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html). |
|
||||
|
||||
## Use Builder mode
|
||||
|
||||
Create a query in Builder mode:
|
||||
|
||||
1. Browse and select a metric namespace, metric name, filter, group, and order options using information from the [Metrics Insights keywords table](#use-metrics-insights-keywords).
|
||||
1. For each of these keywords, choose from the list of available options.
|
||||
|
||||
Grafana constructs a SQL query based on your selections.
|
||||
|
||||
## Use Code mode
|
||||
|
||||
You can also write your SQL query directly in a code editor by using Code mode.
|
||||
|
||||
The code editor includes a built-in autocomplete feature that suggests keywords, aggregations, namespaces, metrics, labels, and label values.
|
||||
Suggestions appear after typing a space, comma, or dollar (`$`) character, or by pressing <key>CTRL</key>+<key>Space</key>.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Template variables in the code editor can interfere with autocompletion.
|
||||
{{< /admonition >}}
|
||||
|
||||
To run the query, click **Run query** above the code editor.
|
||||
|
||||
## Query CloudWatch metrics
|
||||
|
||||
You can build two types of queries with the CloudWatch query editor:
|
||||
You can create two types of queries in the CloudWatch query editor:
|
||||
|
||||
- [Metric Search](#create-a-metric-search-query)
|
||||
- [Metric Query](#create-a-metric-insights-query), which uses the Metrics Insights feature
|
||||
- [Metric Search](#metric-search-queries), which help you retrieve and filter available metrics.
|
||||
- [Metric Query](#use-metric-insights-syntax), which use the Metrics Insights feature to fetch time series data.
|
||||
|
||||
### Create a Metric Search query
|
||||
The query type you use depends on how you want to interact with AWS metrics. Use the drop-down in the upper middle of the query editor to select which type you want to create.
|
||||
|
||||
To create a valid Metric Search query, specify the namespace, metric name, and at least one statistic. Dimensions are optional, but for a dimension to be considered part of the query it must have both a key and a value.
|
||||
### Metric Search queries
|
||||
|
||||
Metric search queries help you discover and filter available metrics. These queries use wildcards and filters to find metrics without needing to know exact metric names.
|
||||
|
||||
A valid metric query requires a specified namespace, metric name, and at least one statistic. Dimensions are optional, but if included, you must provide both a `key` and a `value`.
|
||||
|
||||
The `Match Exact` option controls how dimension filtering is applied to metric queries. When you enable `match exact`, the query returns only metrics whose dimensions precisely match the specified criteria.
|
||||
|
||||
This requires the following:
|
||||
|
||||
- All dimensions present on the target metric must be explicitly specified.
|
||||
- Dimensions you don't want to filter by must use a wildcard (\*) filter.
|
||||
- The metric schema must match exactly as defined in the [CloudWatch metric schema](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/search-expression-syntax.html) documentation.
|
||||
|
||||
When `Match Exact` is disabled, you can specify any subset of dimensions for filtering. The query returns metrics that:
|
||||
|
||||
- Match the specified namespace and metric name.
|
||||
- Match all defined dimension filters.
|
||||
- May contain additional dimensions beyond those specified.
|
||||
|
||||
This mode provides more flexible querying but may return metrics with unexpected additional dimensions.
|
||||
|
||||
If you enable `Match Exact`, you must also specify all dimensions of the metric you're querying so that the [metric schema](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/search-expression-syntax.html) matches exactly. With `Match Exact` enabled the query only returns metrics that have the specified dimensions and no others, so dimensions that are on the metric but that are not being filtered on must be added as a wildcard (`*`) filter.
|
||||
If `Match Exact` is disabled, you can specify any number of dimensions on which you'd like to filter. With `Match Exact` disabled the query returns any metrics that match the namespace, metric name, and all defined dimensions, whether or not they have additional dimensions.
|
||||
The data source returns up to 100 metrics matching your filter criteria.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
Enhance metric queries using template variables to create dynamic, reusable dashboards.
|
||||
|
||||
#### Create dynamic queries with dimension wildcards
|
||||
|
||||
Use the asterisk (`*`) wildcard for one or more dimension values to monitor a dynamic list of metrics.
|
||||
Use the asterisk (`*`) wildcard for dimension values to create dynamic queries that automatically monitor changing sets of AWS resources.
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/cloudwatch-dimension-wildcard-8.3.0.png" max-width="500px" class="docs-image--right" caption="CloudWatch dimension wildcard" >}}
|
||||
{{< figure src="/static/img/docs/cloudwatch/cloudwatch-dimension-wildcard-8.3.0.png" max-width="500px" caption="CloudWatch dimension wildcard" >}}
|
||||
|
||||
In this example, the query returns all metrics in the namespace `AWS/EC2` with a metric name of `CPUUtilization`, and also queries ANY value for the `InstanceId` dimension.
|
||||
This can help you monitor metrics for AWS resources, like EC2 instances or containers.
|
||||
The query returns the average CPU utilization for all EC2 instances in the default region. With `Match Exact` disabled and `InstanceId` using a wildcard, the query retrieves metrics for any EC2 instance regardless of additional dimensions.
|
||||
|
||||
When an auto-scaling event creates new instances, they automatically appear in the graph without you having to track the new instance IDs.
|
||||
This capability is currently limited to retrieving up to 100 metrics.
|
||||
Auto-scaling events add new instances to the graph without manual instance ID tracking. This feature supports up to 100 metrics.
|
||||
|
||||
You can expand the [Query inspector](ref:query-transform-data-navigate-the-query-tab) button and click `Meta Data` to see the search expression that's automatically built to support wildcards.
|
||||
Click the [**Query inspector**](ref:query-transform-data-navigate-the-query-tab) button and select **Meta Data** to see the search expression that's automatically built to support wildcards.
|
||||
|
||||
To learn more about search expressions, refer to the [CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/search-expression-syntax.html).
|
||||
The search expression is defined by default in such a way that the queried metrics must match the defined dimension names exactly.
|
||||
This means that in the example, the query returns only metrics with exactly one dimension containing the name 'InstanceId'.
|
||||
This means that in the example, the query returns only metrics with exactly one dimension containing the name `InstanceId`.
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/cloudwatch-meta-inspector-8.3.0.png" max-width="500px" class="docs-image--right" caption="CloudWatch Meta Inspector" >}}
|
||||
|
||||
You can disable `Match Exact` to include metrics that have other dimensions defined.
|
||||
Disabling `Match Exact` also creates a search expression even if you don't use wildcards. We simply search for any metric that matches at least the namespace, metric name, and all defined dimensions.
|
||||
Disabling `Match Exact` includes metrics with additional dimensions and creates a search expression even without wildcards. Grafana searches for any metric matching at least the namespace, metric name, and all defined dimensions.
|
||||
|
||||
#### Use multi-value template variables
|
||||
|
||||
|
@ -100,47 +165,42 @@ This enables the use of multiple template variables in one query, and also lets
|
|||
Search expressions are limited to 1,024 characters, so your query might fail if you have a long list of values.
|
||||
We recommend using the asterisk (`*`) wildcard instead of the `All` option to query all metrics that have any value for a certain dimension name.
|
||||
|
||||
The use of multi-valued template variables is supported only for dimension values.
|
||||
Multi-valued template variables are supported only for dimension values.
|
||||
Using multi-valued template variables for `Region`, `Namespace`, or `Metric Name` is not supported.
|
||||
|
||||
#### Use metric math expressions
|
||||
|
||||
You can create new time series metrics by operating on top of CloudWatch metrics using mathematical functions.
|
||||
This includes support for arithmetic operators, unary subtraction, and other functions, and can be applied to CloudWatch metrics.
|
||||
For details on the available functions, refer to [AWS Metric Math](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html).
|
||||
Create new time series metrics using mathematical functions on CloudWatch metrics. This supports arithmetic operators, unary subtraction, and other functions. For available functions, refer to [AWS Metric Math](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html).
|
||||
|
||||
For example, to apply arithmetic operations to a metric, apply a unique string id to the raw metric, then use this id and apply arithmetic operations to it in the Expression field of the new metric.
|
||||
To apply arithmetic operations, assign a unique string ID to the raw metric, then reference this ID in the `Expression` field of the new metric.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use the expression field to reference another query, like `queryA * 2`, you can't create an alert rule based on that query.
|
||||
If you use the expression field to reference another query, such as `queryA * 2`, you can't create an alert rule based on that query.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Period macro
|
||||
### Query metrics across AWS monitoring accounts
|
||||
|
||||
If you're using a CloudWatch [`SEARCH`](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/search-expression-syntax.html) expression, you may want to use the `$__period_auto` macro rather than specifying a period explicitly. The `$__period_auto` macro will resolve to a [CloudWatch period](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) that is suitable for the chosen time range.
|
||||
When you select `Builder` mode within the Metric search editor, a new Account field is displayed. Use the `Account` field to specify which of the linked monitoring accounts to target for the given query. By default, the `All` option is specified, which will target all linked accounts.
|
||||
|
||||
#### Deep-link Grafana panels to the CloudWatch console
|
||||
While in `Code` mode, you can specify any math expression. If the Monitoring account badge displays in the query editor header, all `SEARCH` expressions entered in this field will be cross-account by default and can query metrics from linked accounts. Note that while queries run cross-account, the autocomplete feature currently doesn't fetch cross-account resources, so you'll need to manually specify resource names when writing cross-account queries.
|
||||
You can limit the search to one or a set of accounts, as documented in the [AWS documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
|
||||
|
||||
{{< figure src="/static/img/docs/v65/cloudwatch-deep-linking.png" max-width="500px" class="docs-image--right" caption="CloudWatch deep linking" >}}
|
||||
### Period macro
|
||||
|
||||
Left-clicking a time series in the panel shows a context menu with a link to `View in CloudWatch console`.
|
||||
Clicking that link opens a new tab that takes you to the CloudWatch console and displays all metrics for that query.
|
||||
If you use a CloudWatch [`SEARCH`](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/search-expression-syntax.html) expression, consider using the `$__period_auto` macro rather than specifying a period explicitly. The `$__period_auto` macro will resolve to a [CloudWatch period](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) that is suitable for the chosen time range.
|
||||
|
||||
### Deep-link Grafana panels to the CloudWatch console
|
||||
|
||||
Left-clicking a time series in the panel displays a context menu with a link to `View in CloudWatch console`.
|
||||
Clicking the link opens a new tab that takes you to the CloudWatch console and displays all metrics for that query.
|
||||
If you're not logged in to the CloudWatch console, the link forwards you to the login page.
|
||||
The provided link is valid for any account but displays the expected metrics only if you're logged in to the account that corresponds to the selected data source in Grafana.
|
||||
The link provided is valid for any account but displays the expected metrics only if you're logged in to the account that corresponds to the selected data source in Grafana.
|
||||
|
||||
{{< figure src="/media/docs/cloudwatch/cloudwatch-deep-link-v12.1.png" caption="CloudWatch deep linking" >}}
|
||||
|
||||
This feature is not available for metrics based on [metric math expressions](#metric-math-expressions).
|
||||
|
||||
### Create a Metric Insights query
|
||||
|
||||
The Metrics Query option in the CloudWatch data source is referred to as **Metric Insights** in the AWS console.
|
||||
It's a fast, flexible, SQL-based query engine that you can use to identify trends and patterns across millions of operational metrics in real time.
|
||||
|
||||
The metrics query editor's Metrics Query option has two editing modes:
|
||||
|
||||
- [Builder mode](#create-a-query-in-builder-mode), which provides a visual query-building interface
|
||||
- [Code mode](#create-a-query-in-code-mode), which provides a code editor for writing queries
|
||||
|
||||
#### Use Metric Insights syntax
|
||||
### Use Metric Insights syntax
|
||||
|
||||
Metric Insights uses a dialect of SQL and this query syntax:
|
||||
|
||||
|
@ -157,9 +217,9 @@ For details about the Metrics Insights syntax, refer to the [AWS reference docum
|
|||
|
||||
For information about Metrics Insights limits, refer to the [AWS feature documentation](https://docs.aws.amazon.com/console/cloudwatch/metricsinsights).
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
You can also augment queries by using [template variables](ref:add-template-variables).
|
||||
|
||||
#### Use Metrics Insights keywords
|
||||
### Use Metrics Insights keywords
|
||||
|
||||
This table summarizes common Metrics Insights query keywords:
|
||||
|
||||
|
@ -174,124 +234,87 @@ This table summarizes common Metrics Insights query keywords:
|
|||
| `ORDER BY` | Optional. Specifies the order in which time series are returned. Options are `ASC`, `DESC`. |
|
||||
| `LIMIT` | Optional. Limits the number of time series returned. |
|
||||
|
||||
#### Create a query in Builder mode
|
||||
|
||||
**To create a query in Builder mode:**
|
||||
|
||||
1. Browse and select a metric namespace, metric name, filter, group, and order options using information from the [Metrics Insights keywords table](#metrics-insights-keywords).
|
||||
1. For each of these keywords, choose from the list of possible options.
|
||||
|
||||
Grafana constructs a SQL query based on your selections.
|
||||
|
||||
#### Create a query in Code mode
|
||||
|
||||
You can also write your SQL query directly in a code editor by using Code mode.
|
||||
|
||||
The code editor has a built-in autocomplete feature that suggests keywords, aggregations, namespaces, metrics, labels, and label values.
|
||||
The suggestions appear after typing a space, comma, or dollar (`$`) character, or the keyboard combination <key>CTRL</key>+<key>Space</key>.
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/cloudwatch-code-editor-autocomplete-8.3.0.png" max-width="500px" class="docs-image--right" caption="Code editor autocomplete" >}}
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Template variables in the code editor can interfere with autocompletion.
|
||||
{{< /admonition >}}
|
||||
|
||||
To run the query, click **Run query** above the code editor.
|
||||
|
||||
### Common query editor fields
|
||||
|
||||
Three fields located at the bottom of the metrics query editor are common to both Metric Search and Metric Query editors.
|
||||
|
||||
#### Id
|
||||
|
||||
The GetMetricData API requires that all queries have a unique ID. Use this field to specify an ID of choice. The ID can include numbers, letters, and underscore, and must start with a lowercase letter. If no ID is specified, grafana will generate an ID using the following pattern `query[refId of the current query row]`, such as `queryA` for the first query row in the panel editor.
|
||||
|
||||
The ID can be used to reference queries in Metric Math expressions.
|
||||
|
||||
#### Period
|
||||
|
||||
A period is the length of time associated with a specific Amazon CloudWatch statistic. Periods are defined in numbers of seconds, and valid values for period are 1, 5, 10, 30, or any multiple of 60.
|
||||
|
||||
If the period field is left blank or set to `auto`, then it calculates automatically based on the time range and [cloudwatch's retention policy](https://aws.amazon.com/about-aws/whats-new/2016/11/cloudwatch-extends-metrics-retention-and-new-user-interface/). The formula used is `time range in seconds / 2000`, and then it snaps to the next higher value in an array of predefined periods `[60, 300, 900, 3600, 21600, 86400]` after removing periods based on retention. By clicking `Show Query Preview` in the query editor, you can see what period Grafana used.
|
||||
|
||||
#### Label
|
||||
|
||||
The label field allows you to override the default name of the metric legend using CloudWatch dynamic labels. If you're using a time-based dynamic label such as `${MIN_MAX_TIME_RANGE}`, then the legend value is derived from the current timezone specified in the time range picker. To see the full list of label patterns and the dynamic label limitations, refer to the [CloudWatch dynamic labels](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html) documentation.
|
||||
|
||||
> **Alias pattern deprecation:** Since Grafana v10.0, the alias field has been deprecated and replaced by dynamic
|
||||
> labels.
|
||||
> Any existing alias pattern is migrated upon upgrade to a corresponding dynamic label pattern.
|
||||
> For details on this change, refer to [issue #48434](https://github.com/grafana/grafana/issues/48434).
|
||||
|
||||
## Query CloudWatch Logs
|
||||
|
||||
The logs query editor helps you write CloudWatch Logs Query Language queries across defined regions and log groups.
|
||||
It supports querying CloudWatch logs with Logs Insights Query Language, OpenSearch PPL and OpenSearch SQL.
|
||||
The logs query editor helps you write CloudWatch Logs Query Language queries across specified regions and log groups.
|
||||
|
||||
### Create a CloudWatch Logs query
|
||||
Use the Log group selector to choose the target log groups for your query. When the Monitoring account badge appears in the query editor header, you can search and select log groups across multiple accounts. Use the `Account` field to filter log groups by account, and for large numbers of log groups, use prefix search to narrow the selection.
|
||||
|
||||
1. Select the query language you would like to use in the Query Language dropdown.
|
||||
1. Select the region and up to 20 log groups to query.
|
||||
You can query CloudWatch Logs using three supported query language options:
|
||||
|
||||
- **Logs Insights QL** - The AWS native query language specifically designed for CloudWatch Logs. It uses a SQL-like syntax with commands like `fields`, `filter`, `stats`, and `sort`. It's optimized for the CloudWatch log structure and offers built-in functions for parsing timestamps, extracting fields from JSON logs, and performing aggregations.
|
||||
- **OpenSearch PPL** - The OpenSearch query language is based on Elasticsearch's query DSL (Domain Specific Language). It uses a pipe-based syntax similar to Unix command-line tools or the Splunk search language, and supports complex boolean logic, range queries, wildcard matching, and full-text search capabilities.
|
||||
- **OpenSearch SQL** - OpenSearch SQL is a query language that uses a SQL-like syntax for querying data in OpenSearch. It supports standard SQL queries and is designed for users familiar with SQL.
|
||||
|
||||
**Create a CloudWatch Logs query:**
|
||||
|
||||
1. Select a region.
|
||||
1. Select **CloudWatch Logs** from the query type drop-down.
|
||||
1. Select the query language you would like to use in the **Query Language** drop-down.
|
||||
1. Click **Select log groups** and choose up to 20 log groups to query.
|
||||
1. Use the main input area to write your logs query. Amazon CloudWatch only supports a subset of OpenSearch SQL and PPL commands. To find out more about the syntax supported, consult [Amazon CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData_Languages.html)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Region and log groups are mandatory fields when querying with Logs Insights QL and OpenSearch PPL. Log group selection is not necessary when querying with OpenSearch SQL. However, selecting log groups simplifies writing logs queries by populating syntax suggestions with discovered log group fields.
|
||||
You must specify the region and log groups when querying with **Logs Insights QL** and **OpenSearch PPL**. **OpenSearch SQL** doesn't require log group selection. However, selecting log groups simplifies query writing by populating syntax suggestions with discovered log group fields.
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Use the main input area to write your logs query. AWS CloudWatch only supports a subset of OpenSearch SQL and PPL commands. To find out more about the syntax supported, consult [Amazon CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData_Languages.html)
|
||||
Click **CloudWatch Logs Insights** to interactively view, search, and analyze your log data in the CloudWatch Logs Insights console. If you're not logged in to the CloudWatch console, the link forwards you to the login page.
|
||||
|
||||
#### Querying Log groups with OpenSearch SQL
|
||||
### Query Log groups with OpenSearch SQL
|
||||
|
||||
When querying log groups with OpenSearch SQL, the log group identifier or ARN _must_ be explicitly stated in the `FROM` clause:
|
||||
When querying log groups with OpenSearch SQL, you **must** explicitly state the log group identifier or ARN in the `FROM` clause:
|
||||
|
||||
```sql
|
||||
SELECT window.start, COUNT(*) AS exceptionCount
|
||||
FROM `log_group`
|
||||
WHERE `@message` LIKE '%Exception%'
|
||||
```
|
||||
```sql
|
||||
SELECT window.start, COUNT(*) AS exceptionCount
|
||||
FROM `log_group`
|
||||
WHERE `@message` LIKE '%Exception%'
|
||||
```
|
||||
|
||||
or, when querying multiple log groups:
|
||||
or, when querying multiple log groups:
|
||||
|
||||
```sql
|
||||
SELECT window.start, COUNT(*) AS exceptionCount
|
||||
FROM `logGroups( logGroupIdentifier: ['LogGroup1', 'LogGroup2'])`
|
||||
WHERE `@message` LIKE '%Exception%'
|
||||
```
|
||||
```sql
|
||||
SELECT window.start, COUNT(*) AS exceptionCount
|
||||
FROM `logGroups( logGroupIdentifier: ['LogGroup1', 'LogGroup2'])`
|
||||
WHERE `@message` LIKE '%Exception%'
|
||||
```
|
||||
|
||||
You can also write queries returning time series data by using the [`stats` command](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_Insights-Visualizing-Log-Data.html).
|
||||
When making `stats` queries in [Explore](ref:explore), make sure you are in Metrics Explore mode.
|
||||
When making `stats` queries in [Explore](ref:explore), ensure you are in Metrics Explore mode.
|
||||
|
||||
### Create queries for alerting
|
||||
|
||||
Alerting requires queries that return numeric data, which CloudWatch Logs supports.
|
||||
For example, you can enable alerts through the use of the `stats` command.
|
||||
|
||||
The following is a valid query for alerting on messages that include the text "Exception":
|
||||
|
||||
```
|
||||
filter @message like /Exception/
|
||||
| stats count(*) as exceptionCount by bin(1h)
|
||||
| sort exceptionCount desc
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you receive an error like `input data must be a wide series but got ...` when trying to alert on a query, make sure that your query returns valid numeric data that can be output to a Time series panel.
|
||||
{{< /admonition >}}
|
||||
|
||||
For more information on Grafana alerts, refer to [Alerting](ref:alerting).
|
||||
|
||||
## Cross-account observability
|
||||
|
||||
The CloudWatch plugin allows monitoring and troubleshooting applications that span multiple accounts within a region. Using cross-account observability, you can seamlessly search, visualize, and analyze metrics and logs without worrying about account boundaries.
|
||||
|
||||
### Get started
|
||||
The CloudWatch plugin monitors and troubleshoots applications that span multiple accounts within a region. Cross-account observability enables seamless searching, visualization, and analysis of metrics and logs across account boundaries.
|
||||
|
||||
To enable cross-account observability, complete the following steps:
|
||||
|
||||
1. Go to the [Amazon CloudWatch docs](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) and follow the instructions on enabling cross-account observability.
|
||||
1. Go to the [Amazon CloudWatch documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) and follow the instructions for enabling cross-account observability.
|
||||
|
||||
1. Add [two API actions](../#cross-account-observability-permissions) to the IAM policy attached to the role/user running the plugin.
|
||||
1. Add [two API actions](https://grafana.com//docs/grafana/latest/datasources/aws-cloudwatch/configure/#cross-account-observability-permissions) to the IAM policy attached to the role/user running the plugin.
|
||||
|
||||
Cross-account querying is available in the plugin through the **Logs**, **Metric search**, and **Metric Insights** modes.
|
||||
After you have it configured, you'll see a **Monitoring account** badge in the query editor header.
|
||||
After you have configured it, you'll see a **Monitoring account** badge in the query editor header.
|
||||
|
||||
{{< figure src="/static/img/docs/cloudwatch/cloudwatch-monitoring-badge-9.3.0.png" max-width="1200px" caption="Monitoring account badge" >}}
|
||||
|
||||
### Metrics editor
|
||||
## Query caching
|
||||
|
||||
When you select the `Builder` mode within the Metric search editor, a new Account field displays. Use the Account field to specify which of the linked accounts to target for the given query. By default, the `All` option is specified, which will target all linked accounts.
|
||||
|
||||
While in `Code` mode, you can specify any math expression. If the Monitoring account badge displays in the query editor header, all `SEARCH` expressions entered in this field will be cross-account by default. You can limit the search to one or a set of accounts, as documented in the [AWS documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
|
||||
|
||||
### Logs editor
|
||||
|
||||
The Log group selector allows you to specify what log groups to target in the logs query. If the Monitoring account badge is displayed in the query editor header, it is possible to search and select log groups across multiple accounts. You can use the Account field in the Log Group Selector to filter Log Groups by Account. If you have many log groups and do not see the log group you'd like to select in the selector, use the prefix search to narrow down the possible log groups.
|
||||
|
||||
### Deep-link Grafana panels to the CloudWatch console
|
||||
|
||||
{{< figure src="/static/img/docs/v70/cloudwatch-logs-deep-linking.png" max-width="500px" class="docs-image--right" caption="CloudWatch Logs deep linking" >}}
|
||||
|
||||
To view your query in the CloudWatch Logs Insights console, click the `CloudWatch Logs Insights` button next to the query editor.
|
||||
If you're not logged in to the CloudWatch console, the link forwards you to the login page.
|
||||
|
||||
The provided link is valid for any account, but displays the expected metrics only if you're logged in to the account that corresponds to the selected data source in Grafana.
|
||||
When you enable [query and resource caching](/docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/#query-and-resource-caching), Grafana temporarily stores the results of data source queries and resource requests. Query caching is available in CloudWatch Metrics in Grafana Cloud and Grafana Enterprise. It is not available in CloudWatch Logs Insights due to how query results are polled from AWS.
|
||||
|
|
|
@ -16,7 +16,7 @@ labels:
|
|||
- oss
|
||||
menuTitle: Template variables
|
||||
title: CloudWatch template variables
|
||||
weight: 400
|
||||
weight: 300
|
||||
refs:
|
||||
variable-syntax:
|
||||
- pattern: /docs/grafana/
|
||||
|
@ -38,10 +38,11 @@ refs:
|
|||
# CloudWatch template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
Grafana lists these variables in drop-down select boxes at the top of the dashboard to help you change the data displayed in your dashboard, and they are called template variables
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
<!-- Grafana refers to such variables as template variables. -->
|
||||
|
||||
For an introduction to templating and template variables, refer to [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
|
||||
|
||||
## Use query variables
|
||||
|
||||
|
@ -67,20 +68,23 @@ For details about the metrics CloudWatch provides, refer to the [CloudWatch docu
|
|||
|
||||
### Use variables in queries
|
||||
|
||||
Use Grafana's variable syntax to include variables in queries.
|
||||
Use the Grafana variable syntax to include variables in queries. A query variable in dynamically retrieves values from your data source using a query.
|
||||
For details, refer to the [variable syntax documentation](ref:variable-syntax).
|
||||
|
||||
## Use ec2_instance_attribute
|
||||
|
||||
The `ec2_instance_attribute` function in template variables allows Grafana to retrieve certain instance metadata from the EC2 metadata service, including `Instance ID` and `region`.
|
||||
|
||||
### Filters
|
||||
|
||||
The `ec2_instance_attribute` query takes `filters` as a filter name and a comma-separated list of values.
|
||||
The `ec2_instance_attribute` query takes a `filters` parameter, where each key is a filter name (such as a tag or instance property), and each value is a comma-separated list of matching values.
|
||||
|
||||
You can specify [pre-defined filters of ec2:DescribeInstances](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html).
|
||||
|
||||
### Select attributes
|
||||
|
||||
A query returns only one attribute per instance.
|
||||
You can select any attribute that has a single value and isn't an object or array, also known as a flat attribute:
|
||||
You can select any attribute that has a single value and isn't an object or array, also known as a `flat attribute`:
|
||||
|
||||
- `AmiLaunchIndex`
|
||||
- `Architecture`
|
||||
|
|
|
@ -130,6 +130,10 @@ Grafana can resolve a user's login from the OAuth2 ID token, user information re
|
|||
Grafana looks at these sources in the order listed until it finds a login.
|
||||
If no login is found, then the user's login is set to user's email address.
|
||||
|
||||
{{< admonition type="important" >}}
|
||||
Email is required for successful sign-up and login with Generic OAuth. Even if you map `login` from another claim (for example `sub`), Grafana still requires the user to have an email. Ensure your provider returns an email claim or configure `email_attribute_path` so Grafana can resolve it. Including the `email` scope is strongly recommended (for OIDC providers use `openid profile email`).
|
||||
{{< /admonition >}}
|
||||
|
||||
Refer to the following table for information on what to configure based on how your Oauth2 provider returns a user's login:
|
||||
|
||||
| Source of login | Required configuration |
|
||||
|
@ -141,6 +145,21 @@ Refer to the following table for information on what to configure based on how y
|
|||
| `login` or `username` field of the OAuth2 access token. | N/A |
|
||||
| Another field of the OAuth2 access token. | Set `login_attribute_path` configuration option. |
|
||||
|
||||
#### Use the `sub` claim for login
|
||||
|
||||
Most of the OAuth2 providers expose a stable subject identifier in the `sub` claim. You can use it to populate the Grafana login by setting `login_attribute_path` to `sub`. Because email is still required, also make sure Grafana can resolve the user's email (for example by including the `email` scope or mapping a custom field via `email_attribute_path`).
|
||||
|
||||
Example configuration:
|
||||
|
||||
```ini
|
||||
[auth.generic_oauth]
|
||||
enabled = true
|
||||
scopes = openid profile email
|
||||
login_attribute_path = sub
|
||||
# If your provider does not return `email` at the top level, map it explicitly
|
||||
# email_attribute_path = user.email
|
||||
```
|
||||
|
||||
### Configure display name
|
||||
|
||||
Grafana can resolve a user's display name from the OAuth2 ID token, user information retrieved from the OAuth2 UserInfo endpoint, or the OAuth2 access token.
|
||||
|
|
2
go.mod
2
go.mod
|
@ -86,7 +86,7 @@ require (
|
|||
github.com/googleapis/gax-go/v2 v2.14.2 // @grafana/grafana-backend-group
|
||||
github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 // @grafana/alerting-backend
|
||||
github.com/grafana/alerting v0.0.0-20251007160934-e642236ea9eb // @grafana/alerting-backend
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team
|
||||
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 // @grafana/identity-access-team
|
||||
github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics
|
||||
|
|
4
go.sum
4
go.sum
|
@ -1585,8 +1585,8 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
|
|||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165 h1:wfehM99Xlpltl9MQx8SITkgFgHmPGqrXoBCVLk/Q6NA=
|
||||
github.com/grafana/alerting v0.0.0-20251002001425-eeed80da0165/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
|
||||
github.com/grafana/alerting v0.0.0-20251007160934-e642236ea9eb h1:ejpL3pI9t1ebEFtxaP7TMDKZagCu1nSq1O8op+sO4DY=
|
||||
github.com/grafana/alerting v0.0.0-20251007160934-e642236ea9eb/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
|
||||
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37 h1:qEwZ+7MbPjzRvTi31iT9w7NBhKIpKwZrFbYmOZLqkwA=
|
||||
|
|
|
@ -1629,6 +1629,7 @@ github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
|
|||
github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const baseConfig = require('./jest.config.js');
|
||||
|
||||
const CODEOWNERS_MANIFEST_FILENAMES_BY_TEAM_PATH = 'codeowners-manifest/filenames-by-team.json';
|
||||
|
||||
const teamName = process.env.TEAM_NAME;
|
||||
if (!teamName) {
|
||||
console.error('ERROR: TEAM_NAME environment variable is required');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const outputDir = `./coverage/by-team/${createOwnerDirectory(teamName)}`;
|
||||
|
||||
const codeownersFilePath = path.join(__dirname, CODEOWNERS_MANIFEST_FILENAMES_BY_TEAM_PATH);
|
||||
|
||||
if (!fs.existsSync(codeownersFilePath)) {
|
||||
console.error(`Codeowners file not found at ${codeownersFilePath} ...`);
|
||||
console.error('Please run: yarn codeowners-manifest first to generate the mapping file');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const codeownersData = JSON.parse(fs.readFileSync(codeownersFilePath, 'utf8'));
|
||||
const teamFiles = codeownersData[teamName] || [];
|
||||
|
||||
if (teamFiles.length === 0) {
|
||||
console.error(`ERROR: No files found for team "${teamName}"`);
|
||||
console.error('Available teams:', Object.keys(codeownersData).join(', '));
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const sourceFiles = teamFiles.filter((file) => {
|
||||
const ext = path.extname(file);
|
||||
return (
|
||||
['.ts', '.tsx', '.js', '.jsx'].includes(ext) &&
|
||||
!file.includes('.test.') &&
|
||||
!file.includes('.spec.') &&
|
||||
!file.includes('.story.') &&
|
||||
!file.includes('.gen.ts') &&
|
||||
!file.includes('.d.ts') &&
|
||||
!file.endsWith('/types.ts')
|
||||
);
|
||||
});
|
||||
|
||||
const testFiles = teamFiles.filter((file) => {
|
||||
const ext = path.extname(file);
|
||||
return ['.ts', '.tsx', '.js', '.jsx'].includes(ext) && (file.includes('.test.') || file.includes('.spec.'));
|
||||
});
|
||||
|
||||
if (testFiles.length === 0) {
|
||||
console.log(`No test files found for team ${teamName}`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
console.log(
|
||||
`🧪 Collecting coverage for ${sourceFiles.length} testable files and running ${testFiles.length} test files of ${teamFiles.length} files owned by ${teamName}.`
|
||||
);
|
||||
|
||||
module.exports = {
|
||||
...baseConfig,
|
||||
|
||||
collectCoverage: true,
|
||||
collectCoverageFrom: sourceFiles.map((file) => `<rootDir>/${file}`),
|
||||
coverageReporters: ['none'],
|
||||
coverageDirectory: '/tmp/jest-coverage-ignore',
|
||||
|
||||
coverageProvider: 'v8',
|
||||
reporters: [
|
||||
'default',
|
||||
[
|
||||
'jest-monocart-coverage',
|
||||
{
|
||||
name: `Coverage Report - ${teamName} owned files`,
|
||||
outputDir: outputDir,
|
||||
reports: ['console-summary', 'v8', 'json', 'lcov'],
|
||||
sourceFilter: (coveredFile) => sourceFiles.includes(coveredFile),
|
||||
all: {
|
||||
dir: ['./packages', './public'],
|
||||
filter: (filePath) => {
|
||||
const relativePath = filePath.replace(process.cwd() + '/', '');
|
||||
return sourceFiles.includes(relativePath);
|
||||
},
|
||||
},
|
||||
cleanCache: true,
|
||||
onEnd: (coverageResults) => {
|
||||
console.log(`📄 Coverage report saved to file://${path.resolve(outputDir)}/index.html`);
|
||||
// TODO: Emit coverage metrics https://github.com/grafana/grafana/issues/111208
|
||||
},
|
||||
},
|
||||
],
|
||||
],
|
||||
|
||||
testRegex: undefined,
|
||||
|
||||
testMatch: testFiles.map((file) => `<rootDir>/${file}`),
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a filesystem-safe directory structure for different owner types
|
||||
* @param {string} owner - CODEOWNERS owner (username, team, or email)
|
||||
* @returns {string} Directory path relative to coverage/by-team/
|
||||
*/
|
||||
function createOwnerDirectory(owner) {
|
||||
if (owner.includes('@') && owner.includes('/')) {
|
||||
// Example: @grafana/dataviz-squad
|
||||
const [org, team] = owner.substring(1).split('/');
|
||||
return `teams/${org}/${team}`;
|
||||
} else if (owner.startsWith('@')) {
|
||||
// Example: @jesdavpet
|
||||
return `users/${owner.substring(1)}`;
|
||||
} else {
|
||||
// Example: user@domain.tld
|
||||
const [user, domain] = owner.split('@');
|
||||
return `emails/${user}-at-${domain}`;
|
||||
}
|
||||
}
|
|
@ -10,6 +10,11 @@
|
|||
"build": "NODE_ENV=production nx exec --verbose -- webpack --config scripts/webpack/webpack.prod.js",
|
||||
"build:nominify": "yarn run build -- --env noMinify=1",
|
||||
"build:stats": "NODE_ENV=production webpack --progress --config scripts/webpack/webpack.stats.js",
|
||||
"codeowners-manifest": "node ./scripts/codeowners-manifest/index.js",
|
||||
"codeowners-manifest:clean": "rm -rf codeowners-manifest && mkdir -p codeowners-manifest",
|
||||
"codeowners-manifest:generate": "node ./scripts/codeowners-manifest/generate.js",
|
||||
"codeowners-manifest:metadata": "node ./scripts/codeowners-manifest/metadata.js",
|
||||
"codeowners-manifest:raw": "node ./scripts/codeowners-manifest/raw.js",
|
||||
"dev": "NODE_ENV=dev nx exec -- webpack --config scripts/webpack/webpack.dev.js",
|
||||
"e2e": "./e2e/start-and-run-suite",
|
||||
"e2e:old-arch": "./e2e/start-and-run-suite old-arch",
|
||||
|
@ -30,6 +35,7 @@
|
|||
"e2e:plugin:build:dev": "nx run-many -t dev --projects='@test-plugins/*' --maxParallel=100",
|
||||
"test": "jest --notify --watch",
|
||||
"test:coverage": "jest --coverage",
|
||||
"test:coverage:by-codeowner": "yarn codeowners-manifest && node scripts/test-coverage-by-codeowner.js",
|
||||
"test:storybook": "yarn workspace @grafana/ui storybook:test",
|
||||
"test:coverage:changes": "jest --coverage --changedSince=origin/main",
|
||||
"test:accessibility-report": "./scripts/generate-a11y-report.sh",
|
||||
|
@ -198,6 +204,7 @@
|
|||
"expose-loader": "5.0.1",
|
||||
"fishery": "^2.2.2",
|
||||
"fork-ts-checker-webpack-plugin": "9.1.0",
|
||||
"github-codeowners": "^0.2.1",
|
||||
"glob": "11.0.3",
|
||||
"html-loader": "5.1.0",
|
||||
"html-webpack-plugin": "5.6.3",
|
||||
|
@ -211,6 +218,7 @@
|
|||
"jest-fail-on-console": "3.3.1",
|
||||
"jest-junit": "16.0.0",
|
||||
"jest-matcher-utils": "29.7.0",
|
||||
"jest-monocart-coverage": "^1.1.1",
|
||||
"jest-watch-typeahead": "^2.2.2",
|
||||
"jimp": "^1.6.0",
|
||||
"jsdom-testing-mocks": "^1.13.1",
|
||||
|
|
|
@ -429,18 +429,20 @@ export function sortDataFrame(data: DataFrame, sortIndex?: number, reverse = fal
|
|||
|
||||
return {
|
||||
...data,
|
||||
fields: data.fields.map((f) => {
|
||||
const newF = {
|
||||
...f,
|
||||
values: f.values.map((v, i) => f.values[index[i]]),
|
||||
fields: data.fields.map((field) => {
|
||||
const newValues = Array.from({ length: field.values.length }, (_, i) => field.values[index[i]]);
|
||||
|
||||
const newField = {
|
||||
...field,
|
||||
values: newValues,
|
||||
};
|
||||
|
||||
// only add .nanos if it exists
|
||||
const { nanos } = f;
|
||||
const { nanos } = field;
|
||||
if (nanos !== undefined) {
|
||||
newF.nanos = nanos.map((n, i) => nanos[index[i]]);
|
||||
newField.nanos = Array.from({ length: nanos.length }, (_, i) => nanos[index[i]]);
|
||||
}
|
||||
return newF;
|
||||
return newField;
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
|
|
@ -323,6 +323,10 @@ export interface FeatureToggles {
|
|||
*/
|
||||
queryService?: boolean;
|
||||
/**
|
||||
* Adds datasource connections to the query service
|
||||
*/
|
||||
queryServiceWithConnections?: boolean;
|
||||
/**
|
||||
* Rewrite requests targeting /ds/query to the query service
|
||||
*/
|
||||
queryServiceRewrite?: boolean;
|
||||
|
|
|
@ -1,17 +1,23 @@
|
|||
import { css } from '@emotion/css';
|
||||
import { useState } from 'react';
|
||||
|
||||
import { TableCellDisplayMode } from '../../types';
|
||||
import { MaybeWrapWithLink } from '../components/MaybeWrapWithLink';
|
||||
import { ImageCellProps, TableCellStyles } from '../types';
|
||||
|
||||
export const ImageCell = ({ cellOptions, field, value, rowIdx }: ImageCellProps) => {
|
||||
const [error, setError] = useState(false);
|
||||
const { text } = field.display!(value);
|
||||
const { alt, title } =
|
||||
cellOptions.type === TableCellDisplayMode.Image ? cellOptions : { alt: undefined, title: undefined };
|
||||
|
||||
if (!text) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<MaybeWrapWithLink field={field} rowIdx={rowIdx}>
|
||||
<img alt={alt} src={text} title={title} />
|
||||
{error ? text : <img alt={alt} src={text} title={title} onError={() => setError(true)} />}
|
||||
</MaybeWrapWithLink>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -102,6 +102,52 @@ describe('PillCell', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('FieldType.other with array', () => {
|
||||
const field = fieldWithValues([['value1', 'value2', 'value3']]);
|
||||
field.type = FieldType.other;
|
||||
expectHTML(
|
||||
render(
|
||||
<PillCell getTextColorForBackground={getTextColorForBackground} field={field} rowIdx={0} theme={theme} />
|
||||
),
|
||||
`
|
||||
<span style="background-color: rgb(63, 43, 91); color: rgb(247, 248, 250);">value1</span>
|
||||
<span style="background-color: rgb(252, 226, 222); color: rgb(32, 34, 38);">value2</span>
|
||||
<span style="background-color: rgb(81, 149, 206); color: rgb(247, 248, 250);">value3</span>
|
||||
`
|
||||
);
|
||||
});
|
||||
|
||||
it('FieldType.other with array with some null values', () => {
|
||||
const field = fieldWithValues([['value1', null, 'value2', undefined, 'value3']]);
|
||||
field.type = FieldType.other;
|
||||
expectHTML(
|
||||
render(
|
||||
<PillCell getTextColorForBackground={getTextColorForBackground} field={field} rowIdx={0} theme={theme} />
|
||||
),
|
||||
`
|
||||
<span style="background-color: rgb(63, 43, 91); color: rgb(247, 248, 250);">value1</span>
|
||||
<span style="background-color: rgb(252, 226, 222); color: rgb(32, 34, 38);">value2</span>
|
||||
<span style="background-color: rgb(81, 149, 206); color: rgb(247, 248, 250);">value3</span>
|
||||
`
|
||||
);
|
||||
});
|
||||
|
||||
it('FieldType.other with non-array', () => {
|
||||
const field = fieldWithValues([{ value1: true, value2: false, value3: 42 }]);
|
||||
field.type = FieldType.other;
|
||||
expectHTML(
|
||||
render(
|
||||
<PillCell
|
||||
getTextColorForBackground={getTextColorForBackground}
|
||||
field={fieldWithValues([])}
|
||||
rowIdx={0}
|
||||
theme={theme}
|
||||
/>
|
||||
),
|
||||
''
|
||||
);
|
||||
});
|
||||
|
||||
it('non-string values', () => {
|
||||
expectHTML(
|
||||
render(
|
||||
|
|
|
@ -65,6 +65,10 @@ export function inferPills(rawValue: TableCellValue): unknown[] {
|
|||
return [];
|
||||
}
|
||||
|
||||
if (Array.isArray(rawValue)) {
|
||||
return rawValue.filter((v) => v != null).map((v) => String(v).trim());
|
||||
}
|
||||
|
||||
const value = String(rawValue);
|
||||
|
||||
if (value[0] === '[') {
|
||||
|
|
|
@ -118,7 +118,9 @@ const CELL_REGISTRY: Record<TableCellOptions['type'], CellRegistryEntry> = {
|
|||
/>
|
||||
)),
|
||||
getStyles: getPillStyles,
|
||||
testField: (field: Field) => field.type === FieldType.string,
|
||||
testField: (field: Field) =>
|
||||
field.type === FieldType.string ||
|
||||
(field.type === FieldType.other && field.values.some((val) => Array.isArray(val))),
|
||||
},
|
||||
[TableCellDisplayMode.Markdown]: {
|
||||
// eslint-disable-next-line react/display-name
|
||||
|
|
|
@ -148,13 +148,16 @@ export const getDefaultCellStyles: TableCellStyles = (theme, { textAlign, should
|
|||
...(shouldOverflow && { minHeight: '100%' }),
|
||||
|
||||
[getActiveCellSelector()]: {
|
||||
'.table-cell-actions': { display: 'flex' },
|
||||
...(shouldOverflow && {
|
||||
zIndex: theme.zIndex.tooltip - 2,
|
||||
height: 'fit-content',
|
||||
minWidth: 'fit-content',
|
||||
}),
|
||||
},
|
||||
|
||||
[getHoverOnlyCellSelector()]: {
|
||||
'.table-cell-actions': { display: 'flex' },
|
||||
},
|
||||
});
|
||||
|
||||
export const getMaxHeightCellStyles: TableCellStyles = (_theme, { textAlign, maxHeight }) =>
|
||||
|
@ -252,3 +255,10 @@ export const getActiveCellSelector = memoize((isNested?: boolean) => {
|
|||
}
|
||||
return selectors.join(', ');
|
||||
});
|
||||
|
||||
export const getHoverOnlyCellSelector = memoize((isNested?: boolean) => {
|
||||
if (IS_SAFARI_26) {
|
||||
return '';
|
||||
}
|
||||
return ACTIVE_CELL_SELECTORS.hover[isNested ? 'nested' : 'normal'];
|
||||
});
|
||||
|
|
|
@ -5,9 +5,9 @@ import (
|
|||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
aggregationv0alpha1 "github.com/grafana/grafana/pkg/aggregator/apis/aggregation/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/aggregator/apiserver/plugin/admission"
|
||||
)
|
||||
|
@ -64,7 +64,7 @@ func (h *PluginHandler) registerRoutes() {
|
|||
case aggregationv0alpha1.DataSourceProxyServiceType:
|
||||
// TODO: implement in future PR
|
||||
case aggregationv0alpha1.QueryServiceType:
|
||||
h.mux.Handle(proxyPath("/namespaces/{namespace}/connections/{uid}/query"), h.QueryDataHandler())
|
||||
h.mux.Handle(proxyPath("/namespaces/{namespace}/datasources/{uid}/query"), h.QueryDataHandler())
|
||||
case aggregationv0alpha1.RouteServiceType:
|
||||
// TODO: implement in future PR
|
||||
case aggregationv0alpha1.StreamServiceType:
|
||||
|
|
|
@ -6,15 +6,15 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1"
|
||||
grafanasemconv "github.com/grafana/grafana/pkg/semconv"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1"
|
||||
aggregationv0alpha1 "github.com/grafana/grafana/pkg/aggregator/apis/aggregation/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/aggregator/apiserver/util"
|
||||
grafanasemconv "github.com/grafana/grafana/pkg/semconv"
|
||||
)
|
||||
|
||||
func (h *PluginHandler) QueryDataHandler() http.HandlerFunc {
|
||||
|
|
|
@ -10,13 +10,14 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
datav0alpha1 "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/aggregator/apis/aggregation/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/aggregator/apiserver/plugin/fakes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryDataHandler(t *testing.T) {
|
||||
|
@ -87,7 +88,7 @@ func TestQueryDataHandler(t *testing.T) {
|
|||
buf := bytes.NewBuffer(nil)
|
||||
assert.NoError(t, json.NewEncoder(buf).Encode(qdr))
|
||||
|
||||
req, err := http.NewRequest("POST", "/apis/testds.example.com/v1/namespaces/default/connections/123/query", buf)
|
||||
req, err := http.NewRequest("POST", "/apis/testds.example.com/v1/namespaces/default/datasources/123/query", buf)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
@ -113,7 +114,7 @@ func TestQueryDataHandler(t *testing.T) {
|
|||
buf := bytes.NewBuffer(nil)
|
||||
assert.NoError(t, json.NewEncoder(buf).Encode(qdr))
|
||||
|
||||
req, err := http.NewRequest("POST", "/apis/testds.example.com/v1/namespaces/default/connections/123/query", buf)
|
||||
req, err := http.NewRequest("POST", "/apis/testds.example.com/v1/namespaces/default/datasources/123/query", buf)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
@ -141,7 +142,7 @@ func TestQueryDataHandler(t *testing.T) {
|
|||
buf := bytes.NewBuffer(nil)
|
||||
assert.NoError(t, json.NewEncoder(buf).Encode(qdr))
|
||||
|
||||
req, err := http.NewRequest("POST", "/apis/testds.example.com/v1/namespaces/default/connections/abc/query", buf)
|
||||
req, err := http.NewRequest("POST", "/apis/testds.example.com/v1/namespaces/default/datasources/abc/query", buf)
|
||||
assert.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
@ -165,7 +166,7 @@ func TestQueryDataHandler(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("should return delegate response if group does not match", func(t *testing.T) {
|
||||
req, err := http.NewRequest("POST", "/apis/wrongds.example.com/v1/namespaces/default/connections/abc/query", bytes.NewBuffer(nil))
|
||||
req, err := http.NewRequest("POST", "/apis/wrongds.example.com/v1/namespaces/default/datasources/abc/query", bytes.NewBuffer(nil))
|
||||
assert.NoError(t, err)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
package v0alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSource struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// DataSource configuration -- these properties are all visible
|
||||
// to anyone able to query the data source from their browser
|
||||
Spec UnstructuredSpec `json:"spec"`
|
||||
|
||||
// Secure values allows setting values that are never shown to users
|
||||
// The returned properties are only the names of the configured values
|
||||
Secure common.InlineSecureValues `json:"secure,omitzero,omitempty"`
|
||||
}
|
||||
|
||||
// DsAccess represents how the datasource connects to the remote service
|
||||
// +k8s:openapi-gen=true
|
||||
// +enum
|
||||
type DsAccess string
|
||||
|
||||
const (
|
||||
// The frontend can connect directly to the remote URL
|
||||
// This method is discouraged
|
||||
DsAccessDirect DsAccess = "direct"
|
||||
|
||||
// Connect to the remote datasource through the grafana backend
|
||||
DsAccessProxy DsAccess = "proxy"
|
||||
)
|
||||
|
||||
func (dsa DsAccess) String() string {
|
||||
return string(dsa)
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type GenericDataSourceSpec struct {
|
||||
// The display name (previously saved as the "name" property)
|
||||
Title string `json:"title"`
|
||||
|
||||
Access DsAccess `json:"access,omitempty"`
|
||||
ReadOnly bool `json:"readOnly,omitempty"`
|
||||
IsDefault bool `json:"isDefault,omitempty"`
|
||||
|
||||
// Server URL
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
User string `json:"user,omitempty"`
|
||||
Database string `json:"database,omitempty"`
|
||||
BasicAuth bool `json:"basicAuth,omitempty"`
|
||||
BasicAuthUser string `json:"basicAuthUser,omitempty"`
|
||||
WithCredentials bool `json:"withCredentials,omitempty"`
|
||||
|
||||
// Generic unstructured configuration settings
|
||||
JsonData common.Unstructured `json:"jsonData,omitzero"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []DataSource `json:"items"`
|
||||
}
|
|
@ -1,6 +1,3 @@
|
|||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=datasource.grafana.com
|
||||
|
||||
package v0alpha1
|
||||
|
|
|
@ -4,9 +4,10 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -14,26 +15,24 @@ const (
|
|||
VERSION = "v0alpha1"
|
||||
)
|
||||
|
||||
var GenericConnectionResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"connections", "connection", "DataSourceConnection",
|
||||
func() runtime.Object { return &DataSourceConnection{} },
|
||||
func() runtime.Object { return &DataSourceConnectionList{} },
|
||||
var DataSourceResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"datasources", "datasource", "DataSource",
|
||||
func() runtime.Object { return &DataSource{} },
|
||||
func() runtime.Object { return &DataSourceList{} },
|
||||
utils.TableColumns{
|
||||
Definition: []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Title", Type: "string", Format: "string", Description: "The datasource title"},
|
||||
{Name: "APIVersion", Type: "string", Format: "string", Description: "API Version"},
|
||||
{Name: "Title", Type: "string", Format: "string", Description: "Title"},
|
||||
{Name: "Created At", Type: "date"},
|
||||
},
|
||||
Reader: func(obj any) ([]interface{}, error) {
|
||||
m, ok := obj.(*DataSourceConnection)
|
||||
Reader: func(obj any) ([]any, error) {
|
||||
m, ok := obj.(*DataSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected connection")
|
||||
}
|
||||
return []interface{}{
|
||||
return []any{
|
||||
m.Name,
|
||||
m.Title,
|
||||
m.APIVersion,
|
||||
m.Spec.Object["title"],
|
||||
m.CreationTimestamp.UTC().Format(time.RFC3339),
|
||||
}, nil
|
||||
},
|
||||
|
|
|
@ -6,26 +6,8 @@ import (
|
|||
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceConnection struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
// The display name
|
||||
Title string `json:"title"`
|
||||
|
||||
// Optional description for the data source (does not exist yet)
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceConnectionList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []DataSourceConnection `json:"items"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type HealthCheckResult struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
|
|
@ -0,0 +1,173 @@
|
|||
package v0alpha1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
openapi "k8s.io/kube-openapi/pkg/common"
|
||||
spec "k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
|
||||
)
|
||||
|
||||
// UnstructuredSpec allows any property to be saved into the spec
|
||||
// Validation will happen from the dynamically loaded schemas for each datasource
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:openapi-gen=true
|
||||
type UnstructuredSpec common.Unstructured
|
||||
|
||||
func (u *UnstructuredSpec) GetString(key string) string {
|
||||
if u.Object == nil {
|
||||
return ""
|
||||
}
|
||||
v := u.Object[key]
|
||||
str, _ := v.(string)
|
||||
return str
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) Set(key string, val any) *UnstructuredSpec {
|
||||
if u.Object == nil {
|
||||
u.Object = make(map[string]any)
|
||||
}
|
||||
if val == nil || val == "" || val == false {
|
||||
delete(u.Object, key)
|
||||
} else {
|
||||
u.Object[key] = val
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) Title() string {
|
||||
return u.GetString("title")
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetTitle(v string) *UnstructuredSpec {
|
||||
return u.Set("title", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) URL() string {
|
||||
return u.GetString("url")
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetURL(v string) *UnstructuredSpec {
|
||||
return u.Set("url", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) Database() string {
|
||||
return u.GetString("database")
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetDatabase(v string) *UnstructuredSpec {
|
||||
return u.Set("database", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) Access() DsAccess {
|
||||
return DsAccess(u.GetString("access"))
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetAccess(v string) *UnstructuredSpec {
|
||||
return u.Set("access", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) User() string {
|
||||
return u.GetString("user")
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetUser(v string) *UnstructuredSpec {
|
||||
return u.Set("user", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) BasicAuth() bool {
|
||||
v, _, _ := unstructured.NestedBool(u.Object, "basicAuth")
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetBasicAuth(v bool) *UnstructuredSpec {
|
||||
return u.Set("basicAuth", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) BasicAuthUser() string {
|
||||
return u.GetString("basicAuthUser")
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetBasicAuthUser(v string) *UnstructuredSpec {
|
||||
return u.Set("basicAuthUser", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) WithCredentials() bool {
|
||||
v, _, _ := unstructured.NestedBool(u.Object, "withCredentials")
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetWithCredentials(v bool) *UnstructuredSpec {
|
||||
return u.Set("withCredentials", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) IsDefault() bool {
|
||||
v, _, _ := unstructured.NestedBool(u.Object, "isDefault")
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetIsDefault(v bool) *UnstructuredSpec {
|
||||
return u.Set("isDefault", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) ReadOnly() bool {
|
||||
v, _, _ := unstructured.NestedBool(u.Object, "readOnly")
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetReadOnly(v bool) *UnstructuredSpec {
|
||||
return u.Set("readOnly", v)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) JSONData() any {
|
||||
return u.Object["jsonData"]
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) SetJSONData(v any) *UnstructuredSpec {
|
||||
return u.Set("jsonData", v)
|
||||
}
|
||||
|
||||
// The OpenAPI spec uses the generated values from GenericDataSourceSpec, except that it:
|
||||
// 1. Allows additional properties at the root
|
||||
// 2. The jsonData field *may* be an raw value OR a map
|
||||
func (UnstructuredSpec) OpenAPIDefinition() openapi.OpenAPIDefinition {
|
||||
s := schema_pkg_apis_datasource_v0alpha1_GenericDataSourceSpec(func(path string) spec.Ref {
|
||||
return spec.MustCreateRef(path)
|
||||
})
|
||||
s.Schema.AdditionalProperties = &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// MarshalJSON ensures that the unstructured object produces proper
|
||||
// JSON when passed to Go's standard JSON library.
|
||||
func (u *UnstructuredSpec) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(u.Object)
|
||||
}
|
||||
|
||||
// UnmarshalJSON ensures that the unstructured object properly decodes
|
||||
// JSON when passed to Go's standard JSON library.
|
||||
func (u *UnstructuredSpec) UnmarshalJSON(b []byte) error {
|
||||
return json.Unmarshal(b, &u.Object)
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) DeepCopy() *UnstructuredSpec {
|
||||
if u == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UnstructuredSpec)
|
||||
*out = *u
|
||||
|
||||
tmp := common.Unstructured{Object: u.Object}
|
||||
copy := tmp.DeepCopy()
|
||||
out.Object = copy.Object
|
||||
return out
|
||||
}
|
||||
|
||||
func (u *UnstructuredSpec) DeepCopyInto(out *UnstructuredSpec) {
|
||||
clone := u.DeepCopy()
|
||||
*out = *clone
|
||||
}
|
|
@ -8,29 +8,38 @@
|
|||
package v0alpha1
|
||||
|
||||
import (
|
||||
commonv0alpha1 "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataSourceConnection) DeepCopyInto(out *DataSourceConnection) {
|
||||
func (in *DataSource) DeepCopyInto(out *DataSource) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
if in.Secure != nil {
|
||||
in, out := &in.Secure, &out.Secure
|
||||
*out = make(map[string]commonv0alpha1.InlineSecureValue, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConnection.
|
||||
func (in *DataSourceConnection) DeepCopy() *DataSourceConnection {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource.
|
||||
func (in *DataSource) DeepCopy() *DataSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DataSourceConnection)
|
||||
out := new(DataSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DataSourceConnection) DeepCopyObject() runtime.Object {
|
||||
func (in *DataSource) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
|
@ -38,13 +47,13 @@ func (in *DataSourceConnection) DeepCopyObject() runtime.Object {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataSourceConnectionList) DeepCopyInto(out *DataSourceConnectionList) {
|
||||
func (in *DataSourceList) DeepCopyInto(out *DataSourceList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DataSourceConnection, len(*in))
|
||||
*out = make([]DataSource, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
|
@ -52,18 +61,18 @@ func (in *DataSourceConnectionList) DeepCopyInto(out *DataSourceConnectionList)
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConnectionList.
|
||||
func (in *DataSourceConnectionList) DeepCopy() *DataSourceConnectionList {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceList.
|
||||
func (in *DataSourceList) DeepCopy() *DataSourceList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DataSourceConnectionList)
|
||||
out := new(DataSourceList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DataSourceConnectionList) DeepCopyObject() runtime.Object {
|
||||
func (in *DataSourceList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
|
@ -14,13 +14,15 @@ import (
|
|||
|
||||
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
|
||||
return map[string]common.OpenAPIDefinition{
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSourceConnection": schema_pkg_apis_datasource_v0alpha1_DataSourceConnection(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSourceConnectionList": schema_pkg_apis_datasource_v0alpha1_DataSourceConnectionList(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.HealthCheckResult": schema_pkg_apis_datasource_v0alpha1_HealthCheckResult(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSource": schema_pkg_apis_datasource_v0alpha1_DataSource(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSourceList": schema_pkg_apis_datasource_v0alpha1_DataSourceList(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.GenericDataSourceSpec": schema_pkg_apis_datasource_v0alpha1_GenericDataSourceSpec(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.HealthCheckResult": schema_pkg_apis_datasource_v0alpha1_HealthCheckResult(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.UnstructuredSpec": UnstructuredSpec{}.OpenAPIDefinition(),
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_datasource_v0alpha1_DataSourceConnection(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_pkg_apis_datasource_v0alpha1_DataSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
|
@ -46,31 +48,37 @@ func schema_pkg_apis_datasource_v0alpha1_DataSourceConnection(ref common.Referen
|
|||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"title": {
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The display name",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
Description: "DataSource configuration -- these properties are all visible to anyone able to query the data source from their browser",
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.UnstructuredSpec"),
|
||||
},
|
||||
},
|
||||
"description": {
|
||||
"secure": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Optional description for the data source (does not exist yet)",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
Description: "Secure values allows setting values that are never shown to users The returned properties are only the names of the configured values",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.InlineSecureValue"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"title"},
|
||||
Required: []string{"metadata", "spec"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
"github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.InlineSecureValue", "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.UnstructuredSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_datasource_v0alpha1_DataSourceConnectionList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_pkg_apis_datasource_v0alpha1_DataSourceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
|
@ -103,18 +111,104 @@ func schema_pkg_apis_datasource_v0alpha1_DataSourceConnectionList(ref common.Ref
|
|||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSourceConnection"),
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSource"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"items"},
|
||||
Required: []string{"metadata", "items"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSourceConnection", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1.DataSource", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_datasource_v0alpha1_GenericDataSourceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The display name (previously saved as the \"name\" property)",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"access": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Possible enum values:\n - `\"direct\"` The frontend can connect directly to the remote URL This method is discouraged\n - `\"proxy\"` Connect to the remote datasource through the grafana backend",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
Enum: []interface{}{"direct", "proxy"},
|
||||
},
|
||||
},
|
||||
"readOnly": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"isDefault": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"url": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Server URL",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"user": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"database": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"basicAuth": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"basicAuthUser": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"withCredentials": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"jsonData": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Generic unstructured configuration settings",
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.Unstructured"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"title", "jsonData"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1.Unstructured"},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
API rule violation: names_match,github.com/grafana/grafana/pkg/apis/datasource/v0alpha1,UnstructuredSpec,Object
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/pkg/apis/datasource/v0alpha1,DataSourceList,ListMeta
|
|
@ -7,6 +7,40 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Connection to a datasource instance
|
||||
// The connection name must be '{group}:{name}'
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceConnection struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitzero,omitempty"`
|
||||
|
||||
// The configured display name
|
||||
Title string `json:"title"`
|
||||
|
||||
// Reference to the kubernets datasource
|
||||
Datasource DataSourceConnectionRef `json:"datasource"`
|
||||
}
|
||||
|
||||
type DataSourceConnectionRef struct {
|
||||
Group string `json:"group"`
|
||||
Version string `json:"version"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// The valid connection name for a group + identifier
|
||||
func DataSourceConnectionName(group, name string) string {
|
||||
return group + ":" + name
|
||||
}
|
||||
|
||||
// List of all datasource instances across all datasource apiservers
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceConnectionList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitzero,omitempty"`
|
||||
|
||||
Items []DataSourceConnection `json:"items"`
|
||||
}
|
||||
|
||||
type DataSourceApiServerRegistry interface {
|
||||
// Get the group and preferred version for a plugin
|
||||
GetDatasourceGroupVersion(pluginId string) (schema.GroupVersion, error)
|
||||
|
@ -24,7 +58,7 @@ type DataSourceApiServerRegistry interface {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceApiServer struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
metav1.ObjectMeta `json:"metadata,omitzero,omitempty"`
|
||||
|
||||
// The display name
|
||||
Title string `json:"title"`
|
||||
|
@ -43,7 +77,7 @@ type DataSourceApiServer struct {
|
|||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type DataSourceApiServerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
metav1.ListMeta `json:"metadata,omitzero,omitempty"`
|
||||
|
||||
Items []DataSourceApiServer `json:"items"`
|
||||
}
|
|
@ -1,6 +1,10 @@
|
|||
package v0alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
|
@ -13,6 +17,32 @@ const (
|
|||
APIVERSION = GROUP + "/" + VERSION
|
||||
)
|
||||
|
||||
var ConnectionResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"connections", "connection", "DataSourceConnection",
|
||||
func() runtime.Object { return &DataSourceConnection{} },
|
||||
func() runtime.Object { return &DataSourceConnectionList{} },
|
||||
utils.TableColumns{
|
||||
Definition: []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Title", Type: "string", Format: "string", Description: "The datasource title"},
|
||||
{Name: "APIVersion", Type: "string", Format: "string", Description: "API Version"},
|
||||
{Name: "Created At", Type: "date"},
|
||||
},
|
||||
Reader: func(obj any) ([]interface{}, error) {
|
||||
m, ok := obj.(*DataSourceConnection)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected connection")
|
||||
}
|
||||
return []interface{}{
|
||||
m.Name,
|
||||
m.Title,
|
||||
m.APIVersion,
|
||||
m.CreationTimestamp.UTC().Format(time.RFC3339),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
var DataSourceApiServerResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"datasourceapiservers", "datasourceapiserver", "DataSourceApiServer",
|
||||
func() runtime.Object { return &DataSourceApiServer{} },
|
||||
|
|
|
@ -75,6 +75,82 @@ func (in *DataSourceApiServerList) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataSourceConnection) DeepCopyInto(out *DataSourceConnection) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Datasource = in.Datasource
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConnection.
|
||||
func (in *DataSourceConnection) DeepCopy() *DataSourceConnection {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DataSourceConnection)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DataSourceConnection) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataSourceConnectionList) DeepCopyInto(out *DataSourceConnectionList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DataSourceConnection, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConnectionList.
|
||||
func (in *DataSourceConnectionList) DeepCopy() *DataSourceConnectionList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DataSourceConnectionList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DataSourceConnectionList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataSourceConnectionRef) DeepCopyInto(out *DataSourceConnectionRef) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceConnectionRef.
|
||||
func (in *DataSourceConnectionRef) DeepCopy() *DataSourceConnectionRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DataSourceConnectionRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *QueryDataRequest) DeepCopyInto(out *QueryDataRequest) {
|
||||
*out = *in
|
||||
|
|
|
@ -14,12 +14,15 @@ import (
|
|||
|
||||
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
|
||||
return map[string]common.OpenAPIDefinition{
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceApiServer": schema_pkg_apis_query_v0alpha1_DataSourceApiServer(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceApiServerList": schema_pkg_apis_query_v0alpha1_DataSourceApiServerList(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryDataRequest": schema_pkg_apis_query_v0alpha1_QueryDataRequest(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryDataResponse": schema_pkg_apis_query_v0alpha1_QueryDataResponse(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryTypeDefinition": schema_pkg_apis_query_v0alpha1_QueryTypeDefinition(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryTypeDefinitionList": schema_pkg_apis_query_v0alpha1_QueryTypeDefinitionList(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceApiServer": schema_pkg_apis_query_v0alpha1_DataSourceApiServer(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceApiServerList": schema_pkg_apis_query_v0alpha1_DataSourceApiServerList(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnection": schema_pkg_apis_query_v0alpha1_DataSourceConnection(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnectionList": schema_pkg_apis_query_v0alpha1_DataSourceConnectionList(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnectionRef": schema_pkg_apis_query_v0alpha1_DataSourceConnectionRef(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryDataRequest": schema_pkg_apis_query_v0alpha1_QueryDataRequest(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryDataResponse": schema_pkg_apis_query_v0alpha1_QueryDataResponse(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryTypeDefinition": schema_pkg_apis_query_v0alpha1_QueryTypeDefinition(ref),
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.QueryTypeDefinitionList": schema_pkg_apis_query_v0alpha1_QueryTypeDefinitionList(ref),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,6 +149,140 @@ func schema_pkg_apis_query_v0alpha1_DataSourceApiServerList(ref common.Reference
|
|||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_query_v0alpha1_DataSourceConnection(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Connection to a datasource instance The connection name must be '{group}:{name}'",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The configured display name",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"datasource": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Reference to the kubernets datasource",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnectionRef"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"title", "datasource"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnectionRef", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_query_v0alpha1_DataSourceConnectionList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "List of all datasource instances across all datasource apiservers",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnection"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"items"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/pkg/apis/query/v0alpha1.DataSourceConnection", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_query_v0alpha1_DataSourceConnectionRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"group": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"version": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"name": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"group", "version", "name"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_query_v0alpha1_QueryDataRequest(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
|
|
@ -1 +1,3 @@
|
|||
API rule violation: list_type_missing,github.com/grafana/grafana/pkg/apis/query/v0alpha1,DataSourceApiServer,AliasIDs
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/pkg/apis/query/v0alpha1,DataSourceApiServerList,ListMeta
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/pkg/apis/query/v0alpha1,DataSourceConnectionList,ListMeta
|
||||
|
|
|
@ -25,7 +25,7 @@ func (b *DataSourceAPIBuilder) GetAuthorizer() authorizer.Authorizer {
|
|||
uidScope := datasources.ScopeProvider.GetResourceScopeUID(attr.GetName())
|
||||
|
||||
// Must have query access to see a connection
|
||||
if attr.GetResource() == b.connectionResourceInfo.GroupResource().Resource {
|
||||
if attr.GetResource() == b.datasourceResourceInfo.GroupResource().Resource {
|
||||
scopes := []string{}
|
||||
if attr.GetName() != "" {
|
||||
scopes = []string{uidScope}
|
||||
|
|
|
@ -51,9 +51,9 @@ func (s *connectionAccess) ConvertToTable(ctx context.Context, object runtime.Ob
|
|||
}
|
||||
|
||||
func (s *connectionAccess) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return s.datasources.Get(ctx, name)
|
||||
return s.datasources.GetDataSource(ctx, name)
|
||||
}
|
||||
|
||||
func (s *connectionAccess) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
|
||||
return s.datasources.List(ctx)
|
||||
return s.datasources.ListDataSources(ctx)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"iter"
|
||||
"maps"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/grafana/authlib/types"
|
||||
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
datasourceV0 "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
gapiutil "github.com/grafana/grafana/pkg/services/apiserver/utils"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
)
|
||||
|
||||
type converter struct {
|
||||
mapper request.NamespaceMapper
|
||||
group string // the expected group
|
||||
plugin string // the expected pluginId
|
||||
alias []string // optional alias for the pluginId
|
||||
}
|
||||
|
||||
func (r *converter) asDataSource(ds *datasources.DataSource) (*datasourceV0.DataSource, error) {
|
||||
if ds.Type != r.plugin && !slices.Contains(r.alias, ds.Type) {
|
||||
return nil, fmt.Errorf("expected datasource type: %s %v // not: %s", r.plugin, r.alias, ds.Type)
|
||||
}
|
||||
|
||||
obj := &datasourceV0.DataSource{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ds.UID,
|
||||
Namespace: r.mapper(ds.OrgID),
|
||||
Generation: int64(ds.Version),
|
||||
},
|
||||
Spec: datasourceV0.UnstructuredSpec{},
|
||||
Secure: ToInlineSecureValues(ds.Type, ds.UID, maps.Keys(ds.SecureJsonData)),
|
||||
}
|
||||
obj.UID = gapiutil.CalculateClusterWideUID(obj)
|
||||
obj.Spec.SetTitle(ds.Name).
|
||||
SetAccess(string(ds.Access)).
|
||||
SetURL(ds.URL).
|
||||
SetDatabase(ds.Database).
|
||||
SetUser(ds.User).
|
||||
SetDatabase(ds.Database).
|
||||
SetBasicAuth(ds.BasicAuth).
|
||||
SetBasicAuthUser(ds.BasicAuthUser).
|
||||
SetWithCredentials(ds.WithCredentials).
|
||||
SetIsDefault(ds.IsDefault).
|
||||
SetReadOnly(ds.ReadOnly).
|
||||
SetJSONData(ds.JsonData)
|
||||
|
||||
if !ds.Created.IsZero() {
|
||||
obj.CreationTimestamp = metav1.NewTime(ds.Created)
|
||||
}
|
||||
if !ds.Updated.IsZero() {
|
||||
obj.ResourceVersion = fmt.Sprintf("%d", ds.Updated.UnixMilli())
|
||||
obj.Annotations = map[string]string{
|
||||
utils.AnnoKeyUpdatedTimestamp: ds.Updated.Format(time.RFC3339),
|
||||
}
|
||||
}
|
||||
|
||||
if ds.APIVersion != "" {
|
||||
obj.APIVersion = fmt.Sprintf("%s/%s", r.group, ds.APIVersion)
|
||||
}
|
||||
|
||||
if ds.ID > 0 {
|
||||
obj.Labels = map[string]string{
|
||||
utils.LabelKeyDeprecatedInternalID: strconv.FormatInt(ds.ID, 10),
|
||||
}
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// ToInlineSecureValues converts secure json into InlineSecureValues with reference names
|
||||
// The names are predictable and can be used while we implement dual writing for secrets
|
||||
func ToInlineSecureValues(dsType string, dsUID string, keys iter.Seq[string]) common.InlineSecureValues {
|
||||
values := make(common.InlineSecureValues)
|
||||
for k := range keys {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(dsType)) // plugin id
|
||||
h.Write([]byte("|"))
|
||||
h.Write([]byte(dsUID)) // unique identifier
|
||||
h.Write([]byte("|"))
|
||||
h.Write([]byte(k)) // property name
|
||||
n := hex.EncodeToString(h.Sum(nil))
|
||||
values[k] = common.InlineSecureValue{
|
||||
Name: "ds-" + n[0:10], // predictable name for dual writing
|
||||
}
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
func (r *converter) toAddCommand(ds *datasourceV0.DataSource) (*datasources.AddDataSourceCommand, error) {
|
||||
if r.group != "" && ds.APIVersion != "" && !strings.HasPrefix(ds.APIVersion, r.group) {
|
||||
return nil, fmt.Errorf("expecting APIGroup: %s", r.group)
|
||||
}
|
||||
info, err := types.ParseNamespace(ds.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := &datasources.AddDataSourceCommand{
|
||||
Name: ds.Spec.Title(),
|
||||
UID: ds.Name,
|
||||
OrgID: info.OrgID,
|
||||
Type: r.plugin,
|
||||
|
||||
Access: datasources.DsAccess(ds.Spec.Access()),
|
||||
URL: ds.Spec.URL(),
|
||||
Database: ds.Spec.Database(),
|
||||
User: ds.Spec.User(),
|
||||
BasicAuth: ds.Spec.BasicAuth(),
|
||||
BasicAuthUser: ds.Spec.BasicAuthUser(),
|
||||
WithCredentials: ds.Spec.WithCredentials(),
|
||||
IsDefault: ds.Spec.IsDefault(),
|
||||
ReadOnly: ds.Spec.ReadOnly(),
|
||||
}
|
||||
|
||||
jsonData := ds.Spec.JSONData()
|
||||
if jsonData != nil {
|
||||
cmd.JsonData = simplejson.NewFromAny(jsonData)
|
||||
}
|
||||
|
||||
cmd.SecureJsonData = toSecureJsonData(ds)
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func (r *converter) toUpdateCommand(ds *datasourceV0.DataSource) (*datasources.UpdateDataSourceCommand, error) {
|
||||
if r.group != "" && ds.APIVersion != "" && !strings.HasPrefix(ds.APIVersion, r.group) {
|
||||
return nil, fmt.Errorf("expecting APIGroup: %s", r.group)
|
||||
}
|
||||
info, err := types.ParseNamespace(ds.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmd := &datasources.UpdateDataSourceCommand{
|
||||
Name: ds.Spec.Title(),
|
||||
UID: ds.Name,
|
||||
OrgID: info.OrgID,
|
||||
Type: r.plugin,
|
||||
|
||||
Access: datasources.DsAccess(ds.Spec.Access()),
|
||||
URL: ds.Spec.URL(),
|
||||
Database: ds.Spec.Database(),
|
||||
User: ds.Spec.User(),
|
||||
BasicAuth: ds.Spec.BasicAuth(),
|
||||
BasicAuthUser: ds.Spec.BasicAuthUser(),
|
||||
WithCredentials: ds.Spec.WithCredentials(),
|
||||
IsDefault: ds.Spec.IsDefault(),
|
||||
ReadOnly: ds.Spec.ReadOnly(),
|
||||
|
||||
// The only field different than add
|
||||
Version: int(ds.Generation),
|
||||
}
|
||||
|
||||
jsonData := ds.Spec.JSONData()
|
||||
if jsonData != nil {
|
||||
cmd.JsonData = simplejson.NewFromAny(jsonData)
|
||||
}
|
||||
cmd.SecureJsonData = toSecureJsonData(ds)
|
||||
return cmd, err
|
||||
}
|
||||
|
||||
func toSecureJsonData(ds *datasourceV0.DataSource) map[string]string {
|
||||
if ds == nil || len(ds.Secure) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
secure := map[string]string{}
|
||||
for k, v := range ds.Secure {
|
||||
if v.Create != "" {
|
||||
secure[k] = v.Create.DangerouslyExposeAndConsumeValue()
|
||||
}
|
||||
if v.Remove {
|
||||
secure[k] = "" // Weirdly, this is the best we can do with the legacy API :(
|
||||
}
|
||||
}
|
||||
return secure
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/authlib/types"
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
)
|
||||
|
||||
func TestConverter(t *testing.T) {
|
||||
t.Run("resource to command", func(t *testing.T) {
|
||||
converter := converter{
|
||||
mapper: types.OrgNamespaceFormatter,
|
||||
plugin: "grafana-testdata-datasource",
|
||||
alias: []string{"testdata"},
|
||||
group: "testdata.grafana.datasource.app",
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
expectedErr string
|
||||
}{
|
||||
{"convert-resource-full", ""},
|
||||
{"convert-resource-empty", ""},
|
||||
{"convert-resource-invalid", "expecting APIGroup: testdata.grafana.datasource.app"},
|
||||
{"convert-resource-invalid2", "invalid stack id"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
obj := &v0alpha1.DataSource{}
|
||||
fpath := filepath.Join("testdata", tt.name+".json")
|
||||
raw, err := os.ReadFile(fpath) // nolint:gosec
|
||||
require.NoError(t, err)
|
||||
err = json.Unmarshal(raw, obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The add command
|
||||
fpath = filepath.Join("testdata", tt.name+"-to-cmd-add.json")
|
||||
add, err := converter.toAddCommand(obj)
|
||||
if tt.expectedErr != "" {
|
||||
require.ErrorContains(t, err, tt.expectedErr)
|
||||
require.Nil(t, add, "cmd should be nil when error exists")
|
||||
|
||||
update, err := converter.toUpdateCommand(obj)
|
||||
require.ErrorContains(t, err, tt.expectedErr)
|
||||
require.Nil(t, update, "cmd should be nil when error exists")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
out, err := json.MarshalIndent(add, "", " ")
|
||||
require.NoError(t, err)
|
||||
raw, _ = os.ReadFile(fpath) // nolint:gosec
|
||||
if !assert.JSONEq(t, string(raw), string(out)) {
|
||||
_ = os.WriteFile(fpath, out, 0600)
|
||||
}
|
||||
|
||||
// The update command
|
||||
fpath = filepath.Join("testdata", tt.name+"-to-cmd-update.json")
|
||||
update, err := converter.toUpdateCommand(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err = json.MarshalIndent(update, "", " ")
|
||||
require.NoError(t, err)
|
||||
raw, _ = os.ReadFile(fpath) // nolint:gosec
|
||||
if !assert.JSONEq(t, string(raw), string(out)) {
|
||||
_ = os.WriteFile(fpath, out, 0600)
|
||||
}
|
||||
|
||||
// Round trip the update (NOTE, not all properties will be included)
|
||||
ds := &datasources.DataSource{}
|
||||
err = json.Unmarshal(raw, ds) // the add command is also a DataSource
|
||||
require.NoError(t, err)
|
||||
|
||||
roundtrip, err := converter.asDataSource(ds)
|
||||
require.NoError(t, err)
|
||||
|
||||
fpath = filepath.Join("testdata", tt.name+"-to-cmd-update-roundtrip.json")
|
||||
out, err = json.MarshalIndent(roundtrip, "", " ")
|
||||
require.NoError(t, err)
|
||||
raw, _ = os.ReadFile(fpath) // nolint:gosec
|
||||
if !assert.JSONEq(t, string(raw), string(out)) {
|
||||
_ = os.WriteFile(fpath, out, 0600)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("dto to resource", func(t *testing.T) {
|
||||
converter := converter{
|
||||
mapper: types.OrgNamespaceFormatter,
|
||||
plugin: "grafana-testdata-datasource",
|
||||
alias: []string{"testdata"},
|
||||
group: "testdata.grafana.datasource.app",
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "convert-dto-testdata",
|
||||
},
|
||||
{
|
||||
name: "convert-dto-empty",
|
||||
},
|
||||
{
|
||||
name: "convert-dto-invalid",
|
||||
expectedErr: "expected datasource type: grafana-testdata-datasource [testdata]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ds := &datasources.DataSource{}
|
||||
fpath := filepath.Join("testdata", tt.name+".json")
|
||||
raw, err := os.ReadFile(fpath) // nolint:gosec
|
||||
require.NoError(t, err)
|
||||
err = json.Unmarshal(raw, ds)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := converter.asDataSource(ds)
|
||||
if tt.expectedErr != "" {
|
||||
require.ErrorContains(t, err, tt.expectedErr)
|
||||
require.Nil(t, obj, "object should be nil when error exists")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify the result
|
||||
fpath = filepath.Join("testdata", tt.name+"-to-resource.json")
|
||||
if obj == nil {
|
||||
_, err := os.Stat(fpath)
|
||||
require.Error(t, err, "file should not exist")
|
||||
require.True(t, errors.Is(err, os.ErrNotExist))
|
||||
} else {
|
||||
out, err := json.MarshalIndent(obj, "", " ")
|
||||
require.NoError(t, err)
|
||||
raw, _ = os.ReadFile(fpath) // nolint:gosec
|
||||
if !assert.JSONEq(t, string(raw), string(out)) {
|
||||
_ = os.WriteFile(fpath, out, 0600)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
_ rest.Scoper = (*legacyStorage)(nil)
|
||||
_ rest.SingularNameProvider = (*legacyStorage)(nil)
|
||||
_ rest.Getter = (*legacyStorage)(nil)
|
||||
_ rest.Lister = (*legacyStorage)(nil)
|
||||
_ rest.Storage = (*legacyStorage)(nil)
|
||||
_ rest.Creater = (*legacyStorage)(nil)
|
||||
_ rest.Updater = (*legacyStorage)(nil)
|
||||
_ rest.GracefulDeleter = (*legacyStorage)(nil)
|
||||
_ rest.CollectionDeleter = (*legacyStorage)(nil)
|
||||
)
|
||||
|
||||
type legacyStorage struct {
|
||||
datasources PluginDatasourceProvider
|
||||
resourceInfo *utils.ResourceInfo
|
||||
}
|
||||
|
||||
func (s *legacyStorage) New() runtime.Object {
|
||||
return s.resourceInfo.NewFunc()
|
||||
}
|
||||
|
||||
func (s *legacyStorage) Destroy() {}
|
||||
|
||||
func (s *legacyStorage) NamespaceScoped() bool {
|
||||
return true // namespace == org
|
||||
}
|
||||
|
||||
func (s *legacyStorage) GetSingularName() string {
|
||||
return s.resourceInfo.GetSingularName()
|
||||
}
|
||||
|
||||
func (s *legacyStorage) NewList() runtime.Object {
|
||||
return s.resourceInfo.NewListFunc()
|
||||
}
|
||||
|
||||
func (s *legacyStorage) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
|
||||
return s.resourceInfo.TableConverter().ConvertToTable(ctx, object, tableOptions)
|
||||
}
|
||||
|
||||
func (s *legacyStorage) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
|
||||
return s.datasources.ListDataSources(ctx)
|
||||
}
|
||||
|
||||
func (s *legacyStorage) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return s.datasources.GetDataSource(ctx, name)
|
||||
}
|
||||
|
||||
// Create implements rest.Creater.
|
||||
func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
||||
ds, ok := obj.(*v0alpha1.DataSource)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected a datasource object")
|
||||
}
|
||||
return s.datasources.CreateDataSource(ctx, ds)
|
||||
}
|
||||
|
||||
// Update implements rest.Updater.
|
||||
func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
old, err := s.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
obj, err := objInfo.UpdatedObject(ctx, old)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
ds, ok := obj.(*v0alpha1.DataSource)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("expected a datasource object")
|
||||
}
|
||||
|
||||
oldDS, ok := obj.(*v0alpha1.DataSource)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("expected a datasource object (old)")
|
||||
}
|
||||
|
||||
// Keep all the old secure values
|
||||
if len(oldDS.Secure) > 0 {
|
||||
for k, v := range oldDS.Secure {
|
||||
_, found := ds.Secure[k]
|
||||
if !found {
|
||||
ds.Secure[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ds, err = s.datasources.UpdateDataSource(ctx, ds)
|
||||
return ds, false, err
|
||||
}
|
||||
|
||||
// Delete implements rest.GracefulDeleter.
|
||||
func (s *legacyStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
|
||||
err := s.datasources.DeleteDataSource(ctx, name)
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// DeleteCollection implements rest.CollectionDeleter.
|
||||
func (s *legacyStorage) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *internalversion.ListOptions) (runtime.Object, error) {
|
||||
dss, err := s.datasources.ListDataSources(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ds := range dss.Items {
|
||||
if err = s.datasources.DeleteDataSource(ctx, ds.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
query "github.com/grafana/grafana/pkg/apis/query/v0alpha1"
|
||||
)
|
||||
|
||||
// Temporary noop storage that lets us map /connections/{name}/query
|
||||
type noopREST struct{}
|
||||
|
||||
var (
|
||||
_ rest.Storage = (*noopREST)(nil)
|
||||
_ rest.Scoper = (*noopREST)(nil)
|
||||
_ rest.Getter = (*noopREST)(nil)
|
||||
_ rest.SingularNameProvider = (*noopREST)(nil)
|
||||
)
|
||||
|
||||
func (r *noopREST) New() runtime.Object {
|
||||
return &query.QueryDataResponse{}
|
||||
}
|
||||
|
||||
func (r *noopREST) Destroy() {}
|
||||
|
||||
func (r *noopREST) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *noopREST) GetSingularName() string {
|
||||
return "noop"
|
||||
}
|
||||
|
||||
func (r *noopREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return &metav1.Status{
|
||||
Status: metav1.StatusSuccess,
|
||||
Message: "noop",
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package datasource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kube-openapi/pkg/spec3"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
"github.com/grafana/grafana/pkg/registry/apis/query/queryschema"
|
||||
)
|
||||
|
||||
func (b *DataSourceAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.OpenAPI, error) {
|
||||
// The plugin description
|
||||
oas.Info.Description = b.pluginJSON.Info.Description
|
||||
|
||||
// The root api URL
|
||||
root := "/apis/" + b.datasourceResourceInfo.GroupVersion().String() + "/"
|
||||
|
||||
// Add queries to the request properties
|
||||
if err := queryschema.AddQueriesToOpenAPI(queryschema.OASQueryOptions{
|
||||
Swagger: oas,
|
||||
PluginJSON: &b.pluginJSON,
|
||||
QueryTypes: b.queryTypes,
|
||||
Root: root,
|
||||
QueryPath: "namespaces/{namespace}/datasources/{name}/query",
|
||||
QueryDescription: fmt.Sprintf("Query the %s datasources", b.pluginJSON.Name),
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Hide the resource routes -- explicit ones will be added if defined below
|
||||
prefix := root + "namespaces/{namespace}/datasources/{name}/resource"
|
||||
r := oas.Paths.Paths[prefix]
|
||||
if r != nil && r.Get != nil {
|
||||
r.Get.Description = "Get resources in the datasource plugin. NOTE, additional routes may exist, but are not exposed via OpenAPI"
|
||||
r.Delete = nil
|
||||
r.Head = nil
|
||||
r.Patch = nil
|
||||
r.Post = nil
|
||||
r.Put = nil
|
||||
r.Options = nil
|
||||
}
|
||||
delete(oas.Paths.Paths, prefix+"/{path}")
|
||||
|
||||
// Set explicit apiVersion and kind on the datasource
|
||||
ds, ok := oas.Components.Schemas["com.github.grafana.grafana.pkg.apis.datasource.v0alpha1.DataSource"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing DS type")
|
||||
}
|
||||
ds.Properties["apiVersion"] = *spec.StringProperty().WithEnum(b.GetGroupVersion().String())
|
||||
ds.Properties["kind"] = *spec.StringProperty().WithEnum("DataSource")
|
||||
|
||||
// Mark connections as deprecated
|
||||
delete(oas.Paths.Paths, root+"namespaces/{namespace}/connections/{name}")
|
||||
query := oas.Paths.Paths[root+"namespaces/{namespace}/connections/{name}/query"]
|
||||
for query == nil || query.Post == nil {
|
||||
return nil, fmt.Errorf("missing temporary connection path")
|
||||
}
|
||||
query.Post.Tags = []string{"Connections (deprecated)"}
|
||||
query.Post.Deprecated = true
|
||||
query.Post.RequestBody = &spec3.RequestBody{
|
||||
RequestBodyProps: spec3.RequestBodyProps{
|
||||
Content: map[string]*spec3.MediaType{
|
||||
"application/json": {
|
||||
MediaTypeProps: spec3.MediaTypeProps{
|
||||
Schema: spec.MapProperty(nil),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return oas, nil
|
||||
}
|
|
@ -5,27 +5,33 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
datasourceV0 "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
gapiutil "github.com/grafana/grafana/pkg/services/apiserver/utils"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/plugincontext"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
// This provides access to settings saved in the database.
|
||||
// Authorization checks will happen within each function, and the user in ctx will
|
||||
// limit which namespace/tenant/org we are talking to
|
||||
type PluginDatasourceProvider interface {
|
||||
// Get gets a specific datasource (that the user in context can see)
|
||||
Get(ctx context.Context, uid string) (*v0alpha1.DataSourceConnection, error)
|
||||
// Get a single data source (any type)
|
||||
GetDataSource(ctx context.Context, uid string) (*datasourceV0.DataSource, error)
|
||||
|
||||
// List lists all data sources the user in context can see
|
||||
List(ctx context.Context) (*v0alpha1.DataSourceConnectionList, error)
|
||||
// List all datasources (any type)
|
||||
ListDataSources(ctx context.Context) (*datasourceV0.DataSourceList, error)
|
||||
|
||||
// Create a data source
|
||||
CreateDataSource(ctx context.Context, ds *datasourceV0.DataSource) (*datasourceV0.DataSource, error)
|
||||
|
||||
// Update a data source
|
||||
UpdateDataSource(ctx context.Context, ds *datasourceV0.DataSource) (*datasourceV0.DataSource, error)
|
||||
|
||||
// Delete a data source (any type)
|
||||
DeleteDataSource(ctx context.Context, uid string) error
|
||||
|
||||
// Return settings (decrypted!) for a specific plugin
|
||||
// This will require "query" permission for the user in context
|
||||
|
@ -44,11 +50,16 @@ type PluginContextWrapper interface {
|
|||
func ProvideDefaultPluginConfigs(
|
||||
dsService datasources.DataSourceService,
|
||||
dsCache datasources.CacheService,
|
||||
contextProvider *plugincontext.Provider) ScopedPluginDatasourceProvider {
|
||||
contextProvider *plugincontext.Provider,
|
||||
cfg *setting.Cfg,
|
||||
) ScopedPluginDatasourceProvider {
|
||||
return &cachingDatasourceProvider{
|
||||
dsService: dsService,
|
||||
dsCache: dsCache,
|
||||
contextProvider: contextProvider,
|
||||
converter: &converter{
|
||||
mapper: request.GetNamespaceMapper(cfg),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,14 +67,22 @@ type cachingDatasourceProvider struct {
|
|||
dsService datasources.DataSourceService
|
||||
dsCache datasources.CacheService
|
||||
contextProvider *plugincontext.Provider
|
||||
converter *converter
|
||||
}
|
||||
|
||||
func (q *cachingDatasourceProvider) GetDatasourceProvider(pluginJson plugins.JSONData) PluginDatasourceProvider {
|
||||
group, _ := plugins.GetDatasourceGroupNameFromPluginID(pluginJson.ID)
|
||||
return &scopedDatasourceProvider{
|
||||
plugin: pluginJson,
|
||||
dsService: q.dsService,
|
||||
dsCache: q.dsCache,
|
||||
contextProvider: q.contextProvider,
|
||||
converter: &converter{
|
||||
mapper: q.converter.mapper,
|
||||
plugin: pluginJson.ID,
|
||||
alias: pluginJson.AliasIDs,
|
||||
group: group,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,6 +91,7 @@ type scopedDatasourceProvider struct {
|
|||
dsService datasources.DataSourceService
|
||||
dsCache datasources.CacheService
|
||||
contextProvider *plugincontext.Provider
|
||||
converter *converter
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -79,11 +99,62 @@ var (
|
|||
_ ScopedPluginDatasourceProvider = (*cachingDatasourceProvider)(nil)
|
||||
)
|
||||
|
||||
func (q *scopedDatasourceProvider) Get(ctx context.Context, uid string) (*v0alpha1.DataSourceConnection, error) {
|
||||
info, err := request.NamespaceInfoFrom(ctx, true)
|
||||
func (q *scopedDatasourceProvider) GetInstanceSettings(ctx context.Context, uid string) (*backend.DataSourceInstanceSettings, error) {
|
||||
if q.contextProvider == nil {
|
||||
return nil, fmt.Errorf("missing contextProvider")
|
||||
}
|
||||
return q.contextProvider.GetDataSourceInstanceSettings(ctx, uid)
|
||||
}
|
||||
|
||||
// CreateDataSource implements PluginDatasourceProvider.
|
||||
func (q *scopedDatasourceProvider) CreateDataSource(ctx context.Context, ds *datasourceV0.DataSource) (*datasourceV0.DataSource, error) {
|
||||
cmd, err := q.converter.toAddCommand(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := q.dsService.AddDataSource(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.converter.asDataSource(out)
|
||||
}
|
||||
|
||||
// UpdateDataSource implements PluginDatasourceProvider.
|
||||
func (q *scopedDatasourceProvider) UpdateDataSource(ctx context.Context, ds *datasourceV0.DataSource) (*datasourceV0.DataSource, error) {
|
||||
cmd, err := q.converter.toUpdateCommand(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out, err := q.dsService.UpdateDataSource(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q.converter.asDataSource(out)
|
||||
}
|
||||
|
||||
// Delete implements PluginDatasourceProvider.
|
||||
func (q *scopedDatasourceProvider) DeleteDataSource(ctx context.Context, uid string) error {
|
||||
user, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ds, err := q.dsCache.GetDatasourceByUID(ctx, uid, user, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ds == nil {
|
||||
return fmt.Errorf("not found")
|
||||
}
|
||||
return q.dsService.DeleteDataSource(ctx, &datasources.DeleteDataSourceCommand{
|
||||
ID: ds.ID,
|
||||
UID: ds.UID,
|
||||
OrgID: ds.OrgID,
|
||||
Name: ds.Name,
|
||||
})
|
||||
}
|
||||
|
||||
// GetDataSource implements PluginDatasourceProvider.
|
||||
func (q *scopedDatasourceProvider) GetDataSource(ctx context.Context, uid string) (*datasourceV0.DataSource, error) {
|
||||
user, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -92,10 +163,11 @@ func (q *scopedDatasourceProvider) Get(ctx context.Context, uid string) (*v0alph
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return asConnection(ds, info.Value)
|
||||
return q.converter.asDataSource(ds)
|
||||
}
|
||||
|
||||
func (q *scopedDatasourceProvider) List(ctx context.Context) (*v0alpha1.DataSourceConnectionList, error) {
|
||||
// ListDataSource implements PluginDatasourceProvider.
|
||||
func (q *scopedDatasourceProvider) ListDataSources(ctx context.Context) (*datasourceV0.DataSourceList, error) {
|
||||
info, err := request.NamespaceInfoFrom(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -109,37 +181,12 @@ func (q *scopedDatasourceProvider) List(ctx context.Context) (*v0alpha1.DataSour
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &v0alpha1.DataSourceConnectionList{
|
||||
Items: []v0alpha1.DataSourceConnection{},
|
||||
result := &datasourceV0.DataSourceList{
|
||||
Items: []datasourceV0.DataSource{},
|
||||
}
|
||||
for _, ds := range dss {
|
||||
v, _ := asConnection(ds, info.Value)
|
||||
v, _ := q.converter.asDataSource(ds)
|
||||
result.Items = append(result.Items, *v)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (q *scopedDatasourceProvider) GetInstanceSettings(ctx context.Context, uid string) (*backend.DataSourceInstanceSettings, error) {
|
||||
if q.contextProvider == nil {
|
||||
return nil, fmt.Errorf("missing contextProvider")
|
||||
}
|
||||
return q.contextProvider.GetDataSourceInstanceSettings(ctx, uid)
|
||||
}
|
||||
|
||||
func asConnection(ds *datasources.DataSource, ns string) (*v0alpha1.DataSourceConnection, error) {
|
||||
v := &v0alpha1.DataSourceConnection{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ds.UID,
|
||||
Namespace: ns,
|
||||
CreationTimestamp: metav1.NewTime(ds.Created),
|
||||
ResourceVersion: fmt.Sprintf("%d", ds.Updated.UnixMilli()),
|
||||
},
|
||||
Title: ds.Name,
|
||||
}
|
||||
v.UID = gapiutil.CalculateClusterWideUID(v) // indicates if the value changed on the server
|
||||
meta, err := utils.MetaAccessor(v)
|
||||
if err != nil {
|
||||
meta.SetUpdatedTimestamp(&ds.Updated)
|
||||
}
|
||||
return v, err
|
||||
}
|
||||
|
|
|
@ -4,13 +4,10 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type QuerierFactoryFunc func(ctx context.Context, ri utils.ResourceInfo, pj plugins.JSONData) (Querier, error)
|
||||
|
@ -48,10 +45,6 @@ type Querier interface {
|
|||
Health(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error)
|
||||
// Resource gets a resource plugin.
|
||||
Resource(ctx context.Context, req *backend.CallResourceRequest, sender backend.CallResourceResponseSender) error
|
||||
// Datasource gets all data source plugins (with elevated permissions).
|
||||
Datasource(ctx context.Context, name string) (*v0alpha1.DataSourceConnection, error)
|
||||
// Datasources lists all data sources (with elevated permissions).
|
||||
Datasources(ctx context.Context) (*v0alpha1.DataSourceConnectionList, error)
|
||||
}
|
||||
|
||||
type DefaultQuerier struct {
|
||||
|
@ -101,47 +94,3 @@ func (q *DefaultQuerier) Health(ctx context.Context, query *backend.CheckHealthR
|
|||
}
|
||||
return q.pluginClient.CheckHealth(ctx, query)
|
||||
}
|
||||
|
||||
func (q *DefaultQuerier) Datasource(ctx context.Context, name string) (*v0alpha1.DataSourceConnection, error) {
|
||||
info, err := request.NamespaceInfoFrom(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds, err := q.dsCache.GetDatasourceByUID(ctx, name, user, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return asConnection(ds, info.Value)
|
||||
}
|
||||
|
||||
func (q *DefaultQuerier) Datasources(ctx context.Context) (*v0alpha1.DataSourceConnectionList, error) {
|
||||
info, err := request.NamespaceInfoFrom(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ds, err := q.dsService.GetDataSourcesByType(ctx, &datasources.GetDataSourcesByTypeQuery{
|
||||
OrgID: info.OrgID,
|
||||
Type: q.pluginJSON.ID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return asConnectionList(q.connectionResourceInfo.TypeMeta(), ds, info.Value)
|
||||
}
|
||||
|
||||
func asConnectionList(typeMeta metav1.TypeMeta, dss []*datasources.DataSource, ns string) (*v0alpha1.DataSourceConnectionList, error) {
|
||||
result := &v0alpha1.DataSourceConnectionList{
|
||||
Items: []v0alpha1.DataSourceConnection{},
|
||||
}
|
||||
for _, ds := range dss {
|
||||
v, _ := asConnection(ds, ns)
|
||||
result.Items = append(result.Items, *v)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -15,13 +16,13 @@ import (
|
|||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
openapi "k8s.io/kube-openapi/pkg/common"
|
||||
"k8s.io/kube-openapi/pkg/spec3"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
datasource "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
query "github.com/grafana/grafana/pkg/apis/query/v0alpha1"
|
||||
datasourceV0 "github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
queryV0 "github.com/grafana/grafana/pkg/apis/query/v0alpha1"
|
||||
grafanaregistry "github.com/grafana/grafana/pkg/apiserver/registry/generic"
|
||||
"github.com/grafana/grafana/pkg/configprovider"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
|
@ -35,19 +36,22 @@ import (
|
|||
"github.com/grafana/grafana/pkg/tsdb/grafana-testdata-datasource/kinds"
|
||||
)
|
||||
|
||||
var _ builder.APIGroupBuilder = (*DataSourceAPIBuilder)(nil)
|
||||
var (
|
||||
_ builder.APIGroupBuilder = (*DataSourceAPIBuilder)(nil)
|
||||
)
|
||||
|
||||
// DataSourceAPIBuilder is used just so wire has something unique to return
|
||||
type DataSourceAPIBuilder struct {
|
||||
connectionResourceInfo utils.ResourceInfo
|
||||
datasourceResourceInfo utils.ResourceInfo
|
||||
|
||||
pluginJSON plugins.JSONData
|
||||
client PluginClient // will only ever be called with the same pluginid!
|
||||
datasources PluginDatasourceProvider
|
||||
contextProvider PluginContextWrapper
|
||||
accessControl accesscontrol.AccessControl
|
||||
queryTypes *query.QueryTypeDefinitionList
|
||||
log log.Logger
|
||||
pluginJSON plugins.JSONData
|
||||
client PluginClient // will only ever be called with the same plugin id!
|
||||
datasources PluginDatasourceProvider
|
||||
contextProvider PluginContextWrapper
|
||||
accessControl accesscontrol.AccessControl
|
||||
queryTypes *queryV0.QueryTypeDefinitionList
|
||||
log log.Logger
|
||||
configCrudUseNewApis bool
|
||||
}
|
||||
|
||||
func RegisterAPIService(
|
||||
|
@ -110,10 +114,12 @@ func RegisterAPIService(
|
|||
contextProvider,
|
||||
accessControl,
|
||||
features.IsEnabledGlobally(featuremgmt.FlagDatasourceQueryTypes),
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiRegistrar.RegisterAPI(builder)
|
||||
}
|
||||
return builder, nil // only used for wire
|
||||
|
@ -135,30 +141,32 @@ func NewDataSourceAPIBuilder(
|
|||
contextProvider PluginContextWrapper,
|
||||
accessControl accesscontrol.AccessControl,
|
||||
loadQueryTypes bool,
|
||||
configCrudUseNewApis bool,
|
||||
) (*DataSourceAPIBuilder, error) {
|
||||
ri, err := resourceFromPluginID(plugin.ID)
|
||||
group, err := plugins.GetDatasourceGroupNameFromPluginID(plugin.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
builder := &DataSourceAPIBuilder{
|
||||
connectionResourceInfo: ri,
|
||||
datasourceResourceInfo: datasourceV0.DataSourceResourceInfo.WithGroupAndShortName(group, plugin.ID),
|
||||
pluginJSON: plugin,
|
||||
client: client,
|
||||
datasources: datasources,
|
||||
contextProvider: contextProvider,
|
||||
accessControl: accessControl,
|
||||
log: log.New("grafana-apiserver.datasource"),
|
||||
configCrudUseNewApis: configCrudUseNewApis,
|
||||
}
|
||||
if loadQueryTypes {
|
||||
// In the future, this will somehow come from the plugin
|
||||
builder.queryTypes, err = getHardcodedQueryTypes(ri.GroupResource().Group)
|
||||
builder.queryTypes, err = getHardcodedQueryTypes(group)
|
||||
}
|
||||
return builder, err
|
||||
}
|
||||
|
||||
// TODO -- somehow get the list from the plugin -- not hardcoded
|
||||
func getHardcodedQueryTypes(group string) (*query.QueryTypeDefinitionList, error) {
|
||||
func getHardcodedQueryTypes(group string) (*queryV0.QueryTypeDefinitionList, error) {
|
||||
var err error
|
||||
var raw json.RawMessage
|
||||
switch group {
|
||||
|
@ -171,7 +179,7 @@ func getHardcodedQueryTypes(group string) (*query.QueryTypeDefinitionList, error
|
|||
return nil, err
|
||||
}
|
||||
if raw != nil {
|
||||
types := &query.QueryTypeDefinitionList{}
|
||||
types := &queryV0.QueryTypeDefinitionList{}
|
||||
err = json.Unmarshal(raw, types)
|
||||
return types, err
|
||||
}
|
||||
|
@ -179,26 +187,27 @@ func getHardcodedQueryTypes(group string) (*query.QueryTypeDefinitionList, error
|
|||
}
|
||||
|
||||
func (b *DataSourceAPIBuilder) GetGroupVersion() schema.GroupVersion {
|
||||
return b.connectionResourceInfo.GroupVersion()
|
||||
return b.datasourceResourceInfo.GroupVersion()
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme, gv schema.GroupVersion) {
|
||||
scheme.AddKnownTypes(gv,
|
||||
&datasource.DataSourceConnection{},
|
||||
&datasource.DataSourceConnectionList{},
|
||||
&datasource.HealthCheckResult{},
|
||||
&datasourceV0.DataSource{},
|
||||
&datasourceV0.DataSourceList{},
|
||||
&datasourceV0.HealthCheckResult{},
|
||||
&unstructured.Unstructured{},
|
||||
|
||||
// Query handler
|
||||
&query.QueryDataRequest{},
|
||||
&query.QueryDataResponse{},
|
||||
&query.QueryTypeDefinition{},
|
||||
&query.QueryTypeDefinitionList{},
|
||||
&queryV0.QueryDataRequest{},
|
||||
&queryV0.QueryDataResponse{},
|
||||
&queryV0.QueryTypeDefinition{},
|
||||
&queryV0.QueryTypeDefinitionList{},
|
||||
&metav1.Status{},
|
||||
)
|
||||
}
|
||||
|
||||
func (b *DataSourceAPIBuilder) InstallSchema(scheme *runtime.Scheme) error {
|
||||
gv := b.connectionResourceInfo.GroupVersion()
|
||||
gv := b.datasourceResourceInfo.GroupVersion()
|
||||
addKnownTypes(scheme, gv)
|
||||
|
||||
// Link this version to the internal representation.
|
||||
|
@ -221,32 +230,45 @@ func (b *DataSourceAPIBuilder) AllowedV0Alpha1Resources() []string {
|
|||
return []string{builder.AllResourcesAllowed}
|
||||
}
|
||||
|
||||
func resourceFromPluginID(pluginID string) (utils.ResourceInfo, error) {
|
||||
group, err := plugins.GetDatasourceGroupNameFromPluginID(pluginID)
|
||||
if err != nil {
|
||||
return utils.ResourceInfo{}, err
|
||||
}
|
||||
return datasource.GenericConnectionResourceInfo.WithGroupAndShortName(group, pluginID+"-connection"), nil
|
||||
}
|
||||
|
||||
func (b *DataSourceAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, _ builder.APIGroupOptions) error {
|
||||
func (b *DataSourceAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions) error {
|
||||
storage := map[string]rest.Storage{}
|
||||
|
||||
conn := b.connectionResourceInfo
|
||||
storage[conn.StoragePath()] = &connectionAccess{
|
||||
datasources: b.datasources,
|
||||
resourceInfo: conn,
|
||||
tableConverter: conn.TableConverter(),
|
||||
}
|
||||
storage[conn.StoragePath("query")] = &subQueryREST{builder: b}
|
||||
storage[conn.StoragePath("health")] = &subHealthREST{builder: b}
|
||||
// Register the raw datasource connection
|
||||
ds := b.datasourceResourceInfo
|
||||
storage[ds.StoragePath("query")] = &subQueryREST{builder: b}
|
||||
storage[ds.StoragePath("health")] = &subHealthREST{builder: b}
|
||||
storage[ds.StoragePath("resource")] = &subResourceREST{builder: b}
|
||||
|
||||
// TODO! only setup this endpoint if it is implemented
|
||||
storage[conn.StoragePath("resource")] = &subResourceREST{builder: b}
|
||||
// FIXME: temporarily register both "datasources" and "connections" query paths
|
||||
// This lets us deploy both datasources/{uid}/query and connections/{uid}/query
|
||||
// while we transition requests to the new path
|
||||
storage["connections"] = &noopREST{} // hidden from openapi
|
||||
storage["connections/query"] = storage[ds.StoragePath("query")] // deprecated in openapi
|
||||
|
||||
if b.configCrudUseNewApis {
|
||||
legacyStore := &legacyStorage{
|
||||
datasources: b.datasources,
|
||||
resourceInfo: &ds,
|
||||
}
|
||||
unified, err := grafanaregistry.NewRegistryStore(opts.Scheme, ds, opts.OptsGetter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
storage[ds.StoragePath()], err = opts.DualWriteBuilder(ds.GroupResource(), legacyStore, unified)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
storage[ds.StoragePath()] = &connectionAccess{
|
||||
datasources: b.datasources,
|
||||
resourceInfo: ds,
|
||||
tableConverter: ds.TableConverter(),
|
||||
}
|
||||
}
|
||||
|
||||
// Frontend proxy
|
||||
if len(b.pluginJSON.Routes) > 0 {
|
||||
storage[conn.StoragePath("proxy")] = &subProxyREST{pluginJSON: b.pluginJSON}
|
||||
storage[ds.StoragePath("proxy")] = &subProxyREST{pluginJSON: b.pluginJSON}
|
||||
}
|
||||
|
||||
// Register hardcoded query schemas
|
||||
|
@ -257,7 +279,7 @@ func (b *DataSourceAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver
|
|||
|
||||
registerQueryConvert(b.client, b.contextProvider, storage)
|
||||
|
||||
apiGroupInfo.VersionedResourcesStorageMap[conn.GroupVersion().Version] = storage
|
||||
apiGroupInfo.VersionedResourcesStorageMap[ds.GroupVersion().Version] = storage
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -271,35 +293,12 @@ func (b *DataSourceAPIBuilder) getPluginContext(ctx context.Context, uid string)
|
|||
|
||||
func (b *DataSourceAPIBuilder) GetOpenAPIDefinitions() openapi.GetOpenAPIDefinitions {
|
||||
return func(ref openapi.ReferenceCallback) map[string]openapi.OpenAPIDefinition {
|
||||
defs := query.GetOpenAPIDefinitions(ref) // required when running standalone
|
||||
for k, v := range datasource.GetOpenAPIDefinitions(ref) {
|
||||
defs[k] = v
|
||||
}
|
||||
defs := queryV0.GetOpenAPIDefinitions(ref) // required when running standalone
|
||||
maps.Copy(defs, datasourceV0.GetOpenAPIDefinitions(ref))
|
||||
return defs
|
||||
}
|
||||
}
|
||||
|
||||
func (b *DataSourceAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenAPI) (*spec3.OpenAPI, error) {
|
||||
// The plugin description
|
||||
oas.Info.Description = b.pluginJSON.Info.Description
|
||||
|
||||
// The root api URL
|
||||
root := "/apis/" + b.connectionResourceInfo.GroupVersion().String() + "/"
|
||||
|
||||
// Add queries to the request properties
|
||||
// Add queries to the request properties
|
||||
err := queryschema.AddQueriesToOpenAPI(queryschema.OASQueryOptions{
|
||||
Swagger: oas,
|
||||
PluginJSON: &b.pluginJSON,
|
||||
QueryTypes: b.queryTypes,
|
||||
Root: root,
|
||||
QueryPath: "namespaces/{namespace}/connections/{name}/query",
|
||||
QueryDescription: fmt.Sprintf("Query the %s datasources", b.pluginJSON.Name),
|
||||
})
|
||||
|
||||
return oas, err
|
||||
}
|
||||
|
||||
func getCorePlugins(cfg *setting.Cfg) ([]plugins.JSONData, error) {
|
||||
coreDataSourcesPath := filepath.Join(cfg.StaticRootPath, "app", "plugins", "datasource")
|
||||
coreDataSourcesSrc := sources.NewLocalSource(
|
||||
|
|
|
@ -6,19 +6,17 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
data "github.com/grafana/grafana-plugin-sdk-go/experimental/apis/data/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/errutil"
|
||||
query "github.com/grafana/grafana/pkg/apis/query/v0alpha1"
|
||||
query_headers "github.com/grafana/grafana/pkg/registry/apis/query"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
"github.com/grafana/grafana/pkg/web"
|
||||
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
type subQueryREST struct {
|
||||
|
@ -29,6 +27,7 @@ var (
|
|||
_ rest.Storage = (*subQueryREST)(nil)
|
||||
_ rest.Connecter = (*subQueryREST)(nil)
|
||||
_ rest.StorageMetadata = (*subQueryREST)(nil)
|
||||
_ rest.Scoper = (*subQueryREST)(nil)
|
||||
)
|
||||
|
||||
func (r *subQueryREST) New() runtime.Object {
|
||||
|
@ -38,6 +37,10 @@ func (r *subQueryREST) New() runtime.Object {
|
|||
|
||||
func (r *subQueryREST) Destroy() {}
|
||||
|
||||
func (r *subQueryREST) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *subQueryREST) ProducesMIMETypes(verb string) []string {
|
||||
return []string{"application/json"} // and parquet!
|
||||
}
|
||||
|
@ -59,15 +62,8 @@ func (r *subQueryREST) Connect(ctx context.Context, name string, opts runtime.Ob
|
|||
|
||||
if err != nil {
|
||||
if errors.Is(err, datasources.ErrDataSourceNotFound) {
|
||||
return nil, k8serrors.NewNotFound(
|
||||
schema.GroupResource{
|
||||
Group: r.builder.connectionResourceInfo.GroupResource().Group,
|
||||
Resource: r.builder.connectionResourceInfo.GroupResource().Resource,
|
||||
},
|
||||
name,
|
||||
)
|
||||
return nil, r.builder.datasourceResourceInfo.NewNotFound(name)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -8,14 +8,16 @@ import (
|
|||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
"github.com/stretchr/testify/require"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/apis/datasource/v0alpha1"
|
||||
queryV0 "github.com/grafana/grafana/pkg/apis/query/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
)
|
||||
|
||||
func TestSubQueryConnect(t *testing.T) {
|
||||
|
@ -115,16 +117,43 @@ func (m mockResponder) Object(statusCode int, obj runtime.Object) {
|
|||
func (m mockResponder) Error(err error) {
|
||||
}
|
||||
|
||||
var _ PluginDatasourceProvider = (*mockDatasources)(nil)
|
||||
|
||||
type mockDatasources struct {
|
||||
}
|
||||
|
||||
// CreateDataSource implements PluginDatasourceProvider.
|
||||
func (m mockDatasources) CreateDataSource(ctx context.Context, ds *v0alpha1.DataSource) (*v0alpha1.DataSource, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// UpdateDataSource implements PluginDatasourceProvider.
|
||||
func (m mockDatasources) UpdateDataSource(ctx context.Context, ds *v0alpha1.DataSource) (*v0alpha1.DataSource, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Delete implements PluginDatasourceProvider.
|
||||
func (m mockDatasources) DeleteDataSource(ctx context.Context, uid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDataSource implements PluginDatasourceProvider.
|
||||
func (m mockDatasources) GetDataSource(ctx context.Context, uid string) (*v0alpha1.DataSource, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ListDataSource implements PluginDatasourceProvider.
|
||||
func (m mockDatasources) ListDataSources(ctx context.Context) (*v0alpha1.DataSourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get gets a specific datasource (that the user in context can see)
|
||||
func (m mockDatasources) Get(ctx context.Context, uid string) (*v0alpha1.DataSourceConnection, error) {
|
||||
func (m mockDatasources) GetConnection(ctx context.Context, uid string) (*queryV0.DataSourceConnection, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// List lists all data sources the user in context can see
|
||||
func (m mockDatasources) List(ctx context.Context) (*v0alpha1.DataSourceConnectionList, error) {
|
||||
func (m mockDatasources) ListConnections(ctx context.Context) (*queryV0.DataSourceConnectionList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -8,11 +8,11 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/plugins/httpresponsesender"
|
||||
)
|
||||
|
||||
|
|
|
@ -18,36 +18,36 @@ func TestResourceRequest(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
desc: "no resource path",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/connections/abc",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/datasources/abc",
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
desc: "root resource path",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/connections/abc/resource",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/datasources/abc/resource",
|
||||
expectedPath: "",
|
||||
expectedURL: "",
|
||||
},
|
||||
{
|
||||
desc: "root resource path",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/connections/abc/resource/",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/datasources/abc/resource/",
|
||||
expectedPath: "",
|
||||
expectedURL: "",
|
||||
},
|
||||
{
|
||||
desc: "resource sub path",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/connections/abc/resource/test",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/datasources/abc/resource/test",
|
||||
expectedPath: "test",
|
||||
expectedURL: "test",
|
||||
},
|
||||
{
|
||||
desc: "resource sub path with colon",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/connections/abc/resource/test-*,*:test-*/_mapping",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/datasources/abc/resource/test-*,*:test-*/_mapping",
|
||||
expectedPath: "test-*,*:test-*/_mapping",
|
||||
expectedURL: "./test-%2A,%2A:test-%2A/_mapping",
|
||||
},
|
||||
{
|
||||
desc: "resource sub path with query params",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/connections/abc/resource/test?k1=v1&k2=v2",
|
||||
url: "http://localhost:6443/apis/test.datasource.grafana.app/v0alpha1/namespaces/default/datasources/abc/resource/test?k1=v1&k2=v2",
|
||||
expectedPath: "test",
|
||||
expectedURL: "test?k1=v1&k2=v2",
|
||||
},
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"metadata": {
|
||||
"name": "unique-identifier",
|
||||
"namespace": "org-0",
|
||||
"uid": "YpaSG5GQAdxtLZtF6BqQWCeYXOhbVi5C4Cg4oILnJC0X",
|
||||
"generation": 8,
|
||||
"creationTimestamp": "2002-03-04T01:00:00Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "456"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jsonData": null,
|
||||
"title": "Display name"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"id": 456,
|
||||
"version": 8,
|
||||
"name": "Display name",
|
||||
"uid": "unique-identifier",
|
||||
"type": "grafana-testdata-datasource",
|
||||
"created": "2002-03-04T01:00:00Z"
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"id": 456,
|
||||
"version": 8,
|
||||
"name": "Hello",
|
||||
"uid": "unique-identifier",
|
||||
"type": "not-valid-plugin",
|
||||
"created": "2002-03-04T01:00:00Z"
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
{
|
||||
"apiVersion": "testdata.grafana.datasource.app/v2alpha1",
|
||||
"metadata": {
|
||||
"name": "unique-identifier",
|
||||
"namespace": "org-0",
|
||||
"uid": "YpaSG5GQAdxtLZtF6BqQWCeYXOhbVi5C4Cg4oILnJC0X",
|
||||
"resourceVersion": "1083805200000",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2002-03-04T01:00:00Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "1234"
|
||||
},
|
||||
"annotations": {
|
||||
"grafana.app/updatedTimestamp": "2004-05-06T01:00:00Z"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"access": "proxy",
|
||||
"basicAuth": true,
|
||||
"basicAuthUser": "xxx",
|
||||
"database": "db",
|
||||
"isDefault": true,
|
||||
"jsonData": {
|
||||
"aaa": "bbb",
|
||||
"bbb": true,
|
||||
"ccc": 1.234
|
||||
},
|
||||
"readOnly": true,
|
||||
"title": "Hello",
|
||||
"url": "http://something/",
|
||||
"user": "A",
|
||||
"withCredentials": true
|
||||
},
|
||||
"secure": {
|
||||
"password": {
|
||||
"name": "ds-d5c1b093af"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
"id": 1234,
|
||||
"version": 2,
|
||||
"name": "Hello",
|
||||
"uid": "unique-identifier",
|
||||
"type": "grafana-testdata-datasource",
|
||||
"access": "proxy",
|
||||
"url": "http://something/",
|
||||
"user": "A",
|
||||
"database": "db",
|
||||
"basicAuth": true,
|
||||
"basicAuthUser": "xxx",
|
||||
"withCredentials": true,
|
||||
"isDefault": true,
|
||||
"jsonData": {
|
||||
"aaa": "bbb",
|
||||
"bbb": true,
|
||||
"ccc": 1.234
|
||||
},
|
||||
"secureJsonData": {
|
||||
"password": "XXXX"
|
||||
},
|
||||
"readOnly": true,
|
||||
"apiVersion": "v2alpha1",
|
||||
"created": "2002-03-04T01:00:00Z",
|
||||
"updated": "2004-05-06T01:00:00Z"
|
||||
}
|
15
pkg/registry/apis/datasource/testdata/convert-resource-empty-to-cmd-add.json
vendored
Normal file
15
pkg/registry/apis/datasource/testdata/convert-resource-empty-to-cmd-add.json
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"name": "Hello testdata",
|
||||
"type": "grafana-testdata-datasource",
|
||||
"access": "",
|
||||
"url": "",
|
||||
"user": "",
|
||||
"database": "",
|
||||
"basicAuth": false,
|
||||
"basicAuthUser": "",
|
||||
"withCredentials": false,
|
||||
"isDefault": false,
|
||||
"jsonData": null,
|
||||
"secureJsonData": null,
|
||||
"uid": "cejobd88i85j4d"
|
||||
}
|
11
pkg/registry/apis/datasource/testdata/convert-resource-empty-to-cmd-update-roundtrip.json
vendored
Normal file
11
pkg/registry/apis/datasource/testdata/convert-resource-empty-to-cmd-update-roundtrip.json
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"metadata": {
|
||||
"name": "cejobd88i85j4d",
|
||||
"namespace": "org-0",
|
||||
"uid": "boDNh7zU3nXj46rOXIJI7r44qaxjs8yy9I9dOj1MyBoX"
|
||||
},
|
||||
"spec": {
|
||||
"jsonData": null,
|
||||
"title": "Hello testdata"
|
||||
}
|
||||
}
|
16
pkg/registry/apis/datasource/testdata/convert-resource-empty-to-cmd-update.json
vendored
Normal file
16
pkg/registry/apis/datasource/testdata/convert-resource-empty-to-cmd-update.json
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"name": "Hello testdata",
|
||||
"type": "grafana-testdata-datasource",
|
||||
"access": "",
|
||||
"url": "",
|
||||
"user": "",
|
||||
"database": "",
|
||||
"basicAuth": false,
|
||||
"basicAuthUser": "",
|
||||
"withCredentials": false,
|
||||
"isDefault": false,
|
||||
"jsonData": null,
|
||||
"secureJsonData": null,
|
||||
"uid": "cejobd88i85j4d",
|
||||
"version": 0
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
{
|
||||
"metadata": {
|
||||
"name": "cejobd88i85j4d"
|
||||
},
|
||||
"spec": {
|
||||
"title": "Hello testdata"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"name": "Hello testdata",
|
||||
"type": "grafana-testdata-datasource",
|
||||
"access": "proxy",
|
||||
"url": "http://something/",
|
||||
"user": "",
|
||||
"database": "db",
|
||||
"basicAuth": true,
|
||||
"basicAuthUser": "xxx",
|
||||
"withCredentials": true,
|
||||
"isDefault": true,
|
||||
"jsonData": {
|
||||
"aaa": "bbb",
|
||||
"bbb": true,
|
||||
"ccc": 1.234
|
||||
},
|
||||
"secureJsonData": {
|
||||
"extra": "",
|
||||
"password": "XXXX"
|
||||
},
|
||||
"uid": "cejobd88i85j4d"
|
||||
}
|
31
pkg/registry/apis/datasource/testdata/convert-resource-full-to-cmd-update-roundtrip.json
vendored
Normal file
31
pkg/registry/apis/datasource/testdata/convert-resource-full-to-cmd-update-roundtrip.json
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
{
|
||||
"metadata": {
|
||||
"name": "cejobd88i85j4d",
|
||||
"namespace": "org-0",
|
||||
"uid": "boDNh7zU3nXj46rOXIJI7r44qaxjs8yy9I9dOj1MyBoX",
|
||||
"generation": 2
|
||||
},
|
||||
"spec": {
|
||||
"access": "proxy",
|
||||
"basicAuth": true,
|
||||
"basicAuthUser": "xxx",
|
||||
"database": "db",
|
||||
"isDefault": true,
|
||||
"jsonData": {
|
||||
"aaa": "bbb",
|
||||
"bbb": true,
|
||||
"ccc": 1.234
|
||||
},
|
||||
"title": "Hello testdata",
|
||||
"url": "http://something/",
|
||||
"withCredentials": true
|
||||
},
|
||||
"secure": {
|
||||
"extra": {
|
||||
"name": "ds-bb8b5d8b32"
|
||||
},
|
||||
"password": {
|
||||
"name": "ds-973a1eb29d"
|
||||
}
|
||||
}
|
||||
}
|
23
pkg/registry/apis/datasource/testdata/convert-resource-full-to-cmd-update.json
vendored
Normal file
23
pkg/registry/apis/datasource/testdata/convert-resource-full-to-cmd-update.json
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"name": "Hello testdata",
|
||||
"type": "grafana-testdata-datasource",
|
||||
"access": "proxy",
|
||||
"url": "http://something/",
|
||||
"user": "",
|
||||
"database": "db",
|
||||
"basicAuth": true,
|
||||
"basicAuthUser": "xxx",
|
||||
"withCredentials": true,
|
||||
"isDefault": true,
|
||||
"jsonData": {
|
||||
"aaa": "bbb",
|
||||
"bbb": true,
|
||||
"ccc": 1.234
|
||||
},
|
||||
"secureJsonData": {
|
||||
"extra": "",
|
||||
"password": "XXXX"
|
||||
},
|
||||
"uid": "cejobd88i85j4d",
|
||||
"version": 2
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"metadata": {
|
||||
"name": "cejobd88i85j4d",
|
||||
"namespace": "default",
|
||||
"uid": "IGIUtEQS21DtLpBG2rSGfuDoUX8cwsGrtb5aXauYeA4X",
|
||||
"resourceVersion": "1745320815000",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2025-04-22T11:20:11Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "12345"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"title": "Hello testdata",
|
||||
"access": "proxy",
|
||||
"isDefault": true,
|
||||
"readOnly": true,
|
||||
"url": "http://something/",
|
||||
"database": "db",
|
||||
"basicAuth": true,
|
||||
"basicAuthUser": "xxx",
|
||||
"withCredentials": true,
|
||||
"jsonData": {
|
||||
"aaa": "bbb",
|
||||
"bbb": true,
|
||||
"ccc": 1.234
|
||||
}
|
||||
},
|
||||
"secure": {
|
||||
"password": { "create": "XXXX" },
|
||||
"extra": { "remove": true }
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
{
|
||||
"apiVersion": "something/else",
|
||||
"metadata": {
|
||||
"name": "cejobd88i85j4d",
|
||||
"namespace": "default",
|
||||
"uid": "IGIUtEQS21DtLpBG2rSGfuDoUX8cwsGrtb5aXauYeA4X",
|
||||
"resourceVersion": "1745320815000",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2025-04-22T11:20:11Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "12345"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"title": "Hello testdata"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"metadata": {
|
||||
"name": "cejobd88i85j4d",
|
||||
"namespace": "stacks-invalid"
|
||||
},
|
||||
"spec": {
|
||||
"title": "Hello testdata"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,161 @@
|
|||
package query
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
queryV0 "github.com/grafana/grafana/pkg/apis/query/v0alpha1"
|
||||
gapiutil "github.com/grafana/grafana/pkg/services/apiserver/utils"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
)
|
||||
|
||||
var (
|
||||
_ rest.Scoper = (*connectionAccess)(nil)
|
||||
_ rest.SingularNameProvider = (*connectionAccess)(nil)
|
||||
_ rest.Getter = (*connectionAccess)(nil)
|
||||
_ rest.Lister = (*connectionAccess)(nil)
|
||||
_ rest.Storage = (*connectionAccess)(nil)
|
||||
)
|
||||
|
||||
// Get all datasource connections -- this will be backed by search or duplicated resource in unified storage
|
||||
type DataSourceConnectionProvider interface {
|
||||
// Get gets a specific datasource (that the user in context can see)
|
||||
// The name is {group}:{name}, see /pkg/apis/query/v0alpha1/connection.go#L34
|
||||
GetConnection(ctx context.Context, namespace string, name string) (*queryV0.DataSourceConnection, error)
|
||||
|
||||
// List lists all data sources the user in context can see
|
||||
ListConnections(ctx context.Context, namespace string) (*queryV0.DataSourceConnectionList, error)
|
||||
}
|
||||
|
||||
type connectionAccess struct {
|
||||
tableConverter rest.TableConvertor
|
||||
connections DataSourceConnectionProvider
|
||||
}
|
||||
|
||||
func (s *connectionAccess) New() runtime.Object {
|
||||
return queryV0.ConnectionResourceInfo.NewFunc()
|
||||
}
|
||||
|
||||
func (s *connectionAccess) Destroy() {}
|
||||
|
||||
func (s *connectionAccess) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *connectionAccess) GetSingularName() string {
|
||||
return queryV0.ConnectionResourceInfo.GetSingularName()
|
||||
}
|
||||
|
||||
func (s *connectionAccess) ShortNames() []string {
|
||||
return queryV0.ConnectionResourceInfo.GetShortNames()
|
||||
}
|
||||
|
||||
func (s *connectionAccess) NewList() runtime.Object {
|
||||
return queryV0.ConnectionResourceInfo.NewListFunc()
|
||||
}
|
||||
|
||||
func (s *connectionAccess) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
|
||||
if s.tableConverter == nil {
|
||||
s.tableConverter = queryV0.ConnectionResourceInfo.TableConverter()
|
||||
}
|
||||
return s.tableConverter.ConvertToTable(ctx, object, tableOptions)
|
||||
}
|
||||
|
||||
func (s *connectionAccess) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return s.connections.GetConnection(ctx, request.NamespaceValue(ctx), name)
|
||||
}
|
||||
|
||||
func (s *connectionAccess) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
|
||||
return s.connections.ListConnections(ctx, request.NamespaceValue(ctx))
|
||||
}
|
||||
|
||||
type connectionsProvider struct {
|
||||
dsService datasources.DataSourceService
|
||||
registry queryV0.DataSourceApiServerRegistry
|
||||
}
|
||||
|
||||
var (
|
||||
_ DataSourceConnectionProvider = (*connectionsProvider)(nil)
|
||||
)
|
||||
|
||||
func (q *connectionsProvider) GetConnection(ctx context.Context, namespace string, name string) (*queryV0.DataSourceConnection, error) {
|
||||
info, err := authlib.ParseNamespace(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds, err := q.dsService.GetDataSource(ctx, &datasources.GetDataSourceQuery{
|
||||
UID: name,
|
||||
OrgID: info.OrgID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO... access control?
|
||||
return q.asConnection(ds, namespace)
|
||||
}
|
||||
|
||||
func (q *connectionsProvider) ListConnections(ctx context.Context, namespace string) (*queryV0.DataSourceConnectionList, error) {
|
||||
ns, err := authlib.ParseNamespace(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dss, err := q.dsService.GetDataSources(ctx, &datasources.GetDataSourcesQuery{
|
||||
OrgID: ns.OrgID,
|
||||
DataSourceLimit: 10000,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &queryV0.DataSourceConnectionList{
|
||||
Items: []queryV0.DataSourceConnection{},
|
||||
}
|
||||
for _, ds := range dss {
|
||||
v, err := q.asConnection(ds, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result.Items = append(result.Items, *v)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (q *connectionsProvider) asConnection(ds *datasources.DataSource, ns string) (v *queryV0.DataSourceConnection, err error) {
|
||||
gv, err := q.registry.GetDatasourceGroupVersion(ds.Type)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("datasource type %q does not map to an apiserver %w", ds.Type, err)
|
||||
}
|
||||
|
||||
v = &queryV0.DataSourceConnection{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: queryV0.DataSourceConnectionName(gv.Group, ds.UID),
|
||||
Namespace: ns,
|
||||
CreationTimestamp: metav1.NewTime(ds.Created),
|
||||
ResourceVersion: fmt.Sprintf("%d", ds.Updated.UnixMilli()),
|
||||
Generation: int64(ds.Version),
|
||||
},
|
||||
Title: ds.Name,
|
||||
Datasource: queryV0.DataSourceConnectionRef{
|
||||
Group: gv.Group,
|
||||
Version: gv.Version,
|
||||
Name: ds.UID,
|
||||
},
|
||||
}
|
||||
v.UID = gapiutil.CalculateClusterWideUID(v) // UID is unique across all groups
|
||||
if !ds.Updated.IsZero() {
|
||||
meta, err := utils.MetaAccessor(v)
|
||||
if err != nil {
|
||||
meta.SetUpdatedTimestamp(&ds.Updated)
|
||||
}
|
||||
}
|
||||
return v, err
|
||||
}
|
|
@ -66,21 +66,8 @@ func AddQueriesToOpenAPI(options OASQueryOptions) error {
|
|||
// Rewrite the query path
|
||||
query := oas.Paths.Paths[root+options.QueryPath]
|
||||
if query != nil && query.Post != nil {
|
||||
query.Post.Tags = []string{"Query"}
|
||||
query.Parameters = []*spec3.Parameter{
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "namespace",
|
||||
In: "path",
|
||||
Description: "object name and auth scope, such as for teams and projects",
|
||||
Example: "default",
|
||||
Required: true,
|
||||
Schema: spec.StringProperty().UniqueValues(),
|
||||
},
|
||||
},
|
||||
}
|
||||
query.Post.Tags = []string{"DataSource"}
|
||||
query.Post.Description = options.QueryDescription
|
||||
query.Post.Parameters = nil //
|
||||
query.Post.RequestBody = &spec3.RequestBody{
|
||||
RequestBodyProps: spec3.RequestBodyProps{
|
||||
Content: map[string]*spec3.MediaType{
|
||||
|
|
|
@ -50,6 +50,7 @@ type QueryAPIBuilder struct {
|
|||
converter *expr.ResultConverter
|
||||
queryTypes *query.QueryTypeDefinitionList
|
||||
legacyDatasourceLookup service.LegacyDataSourceLookup
|
||||
connections DataSourceConnectionProvider
|
||||
}
|
||||
|
||||
func NewQueryAPIBuilder(
|
||||
|
@ -60,6 +61,7 @@ func NewQueryAPIBuilder(
|
|||
registerer prometheus.Registerer,
|
||||
tracer tracing.Tracer,
|
||||
legacyDatasourceLookup service.LegacyDataSourceLookup,
|
||||
connections DataSourceConnectionProvider,
|
||||
) (*QueryAPIBuilder, error) {
|
||||
// Include well typed query definitions
|
||||
var queryTypes *query.QueryTypeDefinitionList
|
||||
|
@ -86,6 +88,7 @@ func NewQueryAPIBuilder(
|
|||
tracer: tracer,
|
||||
features: features,
|
||||
queryTypes: queryTypes,
|
||||
connections: connections,
|
||||
converter: &expr.ResultConverter{
|
||||
Features: features,
|
||||
Tracer: tracer,
|
||||
|
@ -127,6 +130,8 @@ func RegisterAPIService(
|
|||
return authorizer.DecisionAllow, "", nil
|
||||
})
|
||||
|
||||
reg := client.NewDataSourceRegistryFromStore(pluginStore, dataSourcesService)
|
||||
|
||||
builder, err := NewQueryAPIBuilder(
|
||||
features,
|
||||
client.NewSingleTenantInstanceProvider(cfg, features, pluginClient, pCtxProvider, accessControl),
|
||||
|
@ -135,6 +140,7 @@ func RegisterAPIService(
|
|||
registerer,
|
||||
tracer,
|
||||
legacyDatasourceLookup,
|
||||
&connectionsProvider{dsService: dataSourcesService, registry: reg},
|
||||
)
|
||||
apiregistration.RegisterAPI(builder)
|
||||
return builder, err
|
||||
|
@ -148,6 +154,8 @@ func addKnownTypes(scheme *runtime.Scheme, gv schema.GroupVersion) {
|
|||
scheme.AddKnownTypes(gv,
|
||||
&query.DataSourceApiServer{},
|
||||
&query.DataSourceApiServerList{},
|
||||
&query.DataSourceConnection{},
|
||||
&query.DataSourceConnectionList{},
|
||||
&query.QueryDataRequest{},
|
||||
&query.QueryDataResponse{},
|
||||
&query.QueryTypeDefinition{},
|
||||
|
@ -170,6 +178,14 @@ func (b *QueryAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIG
|
|||
|
||||
storage := map[string]rest.Storage{}
|
||||
|
||||
// Get a list of all datasource instances
|
||||
if b.features.IsEnabledGlobally(featuremgmt.FlagQueryServiceWithConnections) {
|
||||
// Eventually this would be backed either by search or reconciler pattern
|
||||
storage[query.ConnectionResourceInfo.StoragePath()] = &connectionAccess{
|
||||
connections: b.connections,
|
||||
}
|
||||
}
|
||||
|
||||
plugins := newPluginsStorage(b.registry)
|
||||
storage[plugins.resourceInfo.StoragePath()] = plugins
|
||||
if !b.features.IsEnabledGlobally(featuremgmt.FlagGrafanaAPIServerWithExperimentalAPIs) {
|
||||
|
|
|
@ -4,6 +4,8 @@ import (
|
|||
"fmt"
|
||||
"maps"
|
||||
|
||||
alertingNotify "github.com/grafana/alerting/notify"
|
||||
"github.com/grafana/alerting/receivers/schema"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -64,11 +66,11 @@ func convertToK8sResource(
|
|||
for _, integration := range receiver.Integrations {
|
||||
spec.Integrations = append(spec.Integrations, model.ReceiverIntegration{
|
||||
Uid: &integration.UID,
|
||||
Type: integration.Config.Type,
|
||||
Type: string(integration.Config.Type()),
|
||||
Version: string(integration.Config.Version),
|
||||
DisableResolveMessage: &integration.DisableResolveMessage,
|
||||
Settings: maps.Clone(integration.Settings),
|
||||
SecureFields: integration.SecureFields(),
|
||||
Version: integration.Config.Version,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -125,14 +127,21 @@ func convertToDomainModel(receiver *model.Receiver) (*ngmodels.Receiver, map[str
|
|||
}
|
||||
storedSecureFields := make(map[string][]string, len(receiver.Spec.Integrations))
|
||||
for _, integration := range receiver.Spec.Integrations {
|
||||
version := &integration.Version
|
||||
if *version == "" {
|
||||
version = nil
|
||||
}
|
||||
config, err := ngmodels.IntegrationConfigFromType(integration.Type, version)
|
||||
t, err := alertingNotify.IntegrationTypeFromString(integration.Type)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var config schema.IntegrationSchemaVersion
|
||||
typeSchema, _ := alertingNotify.GetSchemaForIntegration(t)
|
||||
if integration.Version != "" {
|
||||
var ok bool
|
||||
config, ok = typeSchema.GetVersion(schema.Version(integration.Version))
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("invalid version %s for integration type %s", integration.Version, integration.Type)
|
||||
}
|
||||
} else {
|
||||
config = typeSchema.GetCurrentVersion()
|
||||
}
|
||||
grafanaIntegration := ngmodels.Integration{
|
||||
Name: receiver.Spec.Title,
|
||||
Config: config,
|
||||
|
|
|
@ -741,7 +741,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scopedPluginDatasourceProvider := datasource.ProvideDefaultPluginConfigs(service15, cacheServiceImpl, plugincontextProvider)
|
||||
scopedPluginDatasourceProvider := datasource.ProvideDefaultPluginConfigs(service15, cacheServiceImpl, plugincontextProvider, cfg)
|
||||
v := builder.ProvideDefaultBuildHandlerChainFuncFromBuilders()
|
||||
aggregatorRunner := aggregatorrunner.ProvideNoopAggregatorConfigurator()
|
||||
playlistAppInstaller, err := playlist.RegisterAppInstaller(playlistService, cfg, featureToggles)
|
||||
|
@ -1345,7 +1345,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scopedPluginDatasourceProvider := datasource.ProvideDefaultPluginConfigs(service15, cacheServiceImpl, plugincontextProvider)
|
||||
scopedPluginDatasourceProvider := datasource.ProvideDefaultPluginConfigs(service15, cacheServiceImpl, plugincontextProvider, cfg)
|
||||
v := builder.ProvideDefaultBuildHandlerChainFuncFromBuilders()
|
||||
aggregatorRunner := aggregatorrunner.ProvideNoopAggregatorConfigurator()
|
||||
playlistAppInstaller, err := playlist.RegisterAppInstaller(playlistService, cfg, featureToggles)
|
||||
|
|
|
@ -535,6 +535,13 @@ var (
|
|||
Owner: grafanaDatasourcesCoreServicesSquad,
|
||||
RequiresRestart: true, // Adds a route at startup
|
||||
},
|
||||
{
|
||||
Name: "queryServiceWithConnections",
|
||||
Description: "Adds datasource connections to the query service",
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: grafanaDatasourcesCoreServicesSquad,
|
||||
RequiresRestart: true, // Adds a route at startup
|
||||
},
|
||||
{
|
||||
Name: "queryServiceRewrite",
|
||||
Description: "Rewrite requests targeting /ds/query to the query service",
|
||||
|
|
|
@ -70,6 +70,7 @@ dashboardSchemaValidationLogging,experimental,@grafana/grafana-app-platform-squa
|
|||
scanRowInvalidDashboardParseFallbackEnabled,experimental,@grafana/search-and-storage,false,false,false
|
||||
datasourceQueryTypes,experimental,@grafana/grafana-app-platform-squad,false,true,false
|
||||
queryService,experimental,@grafana/grafana-datasources-core-services,false,true,false
|
||||
queryServiceWithConnections,experimental,@grafana/grafana-datasources-core-services,false,true,false
|
||||
queryServiceRewrite,experimental,@grafana/grafana-datasources-core-services,false,true,false
|
||||
queryServiceFromUI,experimental,@grafana/grafana-datasources-core-services,false,false,true
|
||||
queryServiceFromExplore,experimental,@grafana/grafana-datasources-core-services,false,false,true
|
||||
|
|
|
|
@ -291,6 +291,10 @@ const (
|
|||
// Register /apis/query.grafana.app/ -- will eventually replace /api/ds/query
|
||||
FlagQueryService = "queryService"
|
||||
|
||||
// FlagQueryServiceWithConnections
|
||||
// Adds datasource connections to the query service
|
||||
FlagQueryServiceWithConnections = "queryServiceWithConnections"
|
||||
|
||||
// FlagQueryServiceRewrite
|
||||
// Rewrite requests targeting /ds/query to the query service
|
||||
FlagQueryServiceRewrite = "queryServiceRewrite"
|
||||
|
|
|
@ -3150,6 +3150,19 @@
|
|||
"requiresRestart": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "queryServiceWithConnections",
|
||||
"resourceVersion": "1756367172351",
|
||||
"creationTimestamp": "2025-08-28T07:46:12Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Adds datasource connections to the query service",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/grafana-datasources-core-services",
|
||||
"requiresRestart": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "recordedQueriesMulti",
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
amConfig "github.com/prometheus/alertmanager/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
|
||||
|
@ -549,56 +548,3 @@ func ApiRecordFromModelRecord(r *models.Record) *definitions.Record {
|
|||
TargetDatasourceUID: r.TargetDatasourceUID,
|
||||
}
|
||||
}
|
||||
|
||||
func GettableGrafanaReceiverFromReceiver(r *models.Integration, provenance models.Provenance) (definitions.GettableGrafanaReceiver, error) {
|
||||
out := definitions.GettableGrafanaReceiver{
|
||||
UID: r.UID,
|
||||
Name: r.Name,
|
||||
Type: r.Config.Type,
|
||||
Provenance: definitions.Provenance(provenance),
|
||||
DisableResolveMessage: r.DisableResolveMessage,
|
||||
SecureFields: r.SecureFields(),
|
||||
}
|
||||
|
||||
if len(r.Settings) > 0 {
|
||||
jsonBytes, err := json.Marshal(r.Settings)
|
||||
if err != nil {
|
||||
return definitions.GettableGrafanaReceiver{}, err
|
||||
}
|
||||
out.Settings = jsonBytes
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func GettableApiReceiverFromReceiver(r *models.Receiver) (*definitions.GettableApiReceiver, error) {
|
||||
out := definitions.GettableApiReceiver{
|
||||
Receiver: amConfig.Receiver{
|
||||
Name: r.Name,
|
||||
},
|
||||
GettableGrafanaReceivers: definitions.GettableGrafanaReceivers{
|
||||
GrafanaManagedReceivers: make([]*definitions.GettableGrafanaReceiver, 0, len(r.Integrations)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, integration := range r.Integrations {
|
||||
gettable, err := GettableGrafanaReceiverFromReceiver(integration, r.Provenance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out.GrafanaManagedReceivers = append(out.GrafanaManagedReceivers, &gettable)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func GettableApiReceiversFromReceivers(recvs []*models.Receiver) ([]*definitions.GettableApiReceiver, error) {
|
||||
out := make([]*definitions.GettableApiReceiver, 0, len(recvs))
|
||||
for _, r := range recvs {
|
||||
gettables, err := GettableApiReceiverFromReceiver(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, gettables)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/alerting/models"
|
||||
alertingNotify "github.com/grafana/alerting/notify"
|
||||
|
@ -142,7 +141,7 @@ func (r *Receiver) Validate(decryptFn DecryptFn) error {
|
|||
func (r *Receiver) GetIntegrationTypes() []string {
|
||||
result := make([]string, 0, len(r.Integrations))
|
||||
for _, i := range r.Integrations {
|
||||
result = append(result, i.Config.Type)
|
||||
result = append(result, string(i.Config.Type()))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -151,7 +150,7 @@ func (r *Receiver) GetIntegrationTypes() []string {
|
|||
type Integration struct {
|
||||
UID string
|
||||
Name string
|
||||
Config IntegrationConfig
|
||||
Config schema.IntegrationSchemaVersion
|
||||
DisableResolveMessage bool
|
||||
// Settings can contain both secure and non-secure settings either unencrypted or redacted.
|
||||
Settings map[string]any
|
||||
|
@ -167,189 +166,11 @@ func (integration *Integration) ResourceID() string {
|
|||
return integration.UID
|
||||
}
|
||||
|
||||
// IntegrationConfig represents the configuration of an integration. It contains the type and information about the fields.
|
||||
type IntegrationConfig struct {
|
||||
Type string
|
||||
Version string
|
||||
Fields map[string]IntegrationField
|
||||
}
|
||||
|
||||
// IntegrationField represents a field in an integration configuration.
|
||||
type IntegrationField struct {
|
||||
Name string
|
||||
Fields map[string]IntegrationField
|
||||
Secure bool
|
||||
}
|
||||
|
||||
type IntegrationFieldPath []string
|
||||
|
||||
func NewIntegrationFieldPath(path string) IntegrationFieldPath {
|
||||
return strings.Split(path, ".")
|
||||
}
|
||||
|
||||
func (f IntegrationFieldPath) Head() string {
|
||||
if len(f) > 0 {
|
||||
return f[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f IntegrationFieldPath) Tail() IntegrationFieldPath {
|
||||
return f[1:]
|
||||
}
|
||||
|
||||
func (f IntegrationFieldPath) IsLeaf() bool {
|
||||
return len(f) == 1
|
||||
}
|
||||
|
||||
func (f IntegrationFieldPath) String() string {
|
||||
return strings.Join(f, ".")
|
||||
}
|
||||
|
||||
func (f IntegrationFieldPath) With(segment string) IntegrationFieldPath {
|
||||
// Copy the existing path to avoid modifying the original slice.
|
||||
newPath := make(IntegrationFieldPath, len(f)+1)
|
||||
copy(newPath, f)
|
||||
newPath[len(newPath)-1] = segment
|
||||
return newPath
|
||||
}
|
||||
|
||||
// IntegrationConfigFromType returns an integration configuration for a given integration type of a given version.
|
||||
// If version is nil, the current version of the integration is used.
|
||||
// Returns an error if the integration type is not found or if the specified version does not exist.
|
||||
//
|
||||
// Parameters:
|
||||
//
|
||||
// integrationType - The type of integration to get configuration for
|
||||
// version - Optional specific version to get configuration for, uses latest if nil
|
||||
//
|
||||
// Returns:
|
||||
//
|
||||
// IntegrationConfig - The integration configuration
|
||||
// error - Error if integration type not found or invalid version specified
|
||||
func IntegrationConfigFromType(integrationType string, version *string) (IntegrationConfig, error) {
|
||||
typeSchema, ok := alertingNotify.GetSchemaForIntegration(schema.IntegrationType(integrationType))
|
||||
if !ok {
|
||||
return IntegrationConfig{}, fmt.Errorf("integration type %s not found", integrationType)
|
||||
}
|
||||
if version == nil {
|
||||
return IntegrationConfigFromSchema(typeSchema, typeSchema.CurrentVersion)
|
||||
}
|
||||
return IntegrationConfigFromSchema(typeSchema, schema.Version(*version))
|
||||
}
|
||||
|
||||
// IntegrationConfigFromSchema returns an integration configuration for a given version of the integration type schema.
|
||||
// Returns an error if the schema does not have such version
|
||||
func IntegrationConfigFromSchema(typeSchema schema.IntegrationTypeSchema, version schema.Version) (IntegrationConfig, error) {
|
||||
typeVersion, ok := typeSchema.GetVersion(version)
|
||||
if !ok {
|
||||
return IntegrationConfig{}, fmt.Errorf("version %s not found in config", version)
|
||||
}
|
||||
integrationConfig := IntegrationConfig{
|
||||
Type: string(typeSchema.Type),
|
||||
Version: string(typeVersion.Version),
|
||||
Fields: make(map[string]IntegrationField, len(typeVersion.Options)),
|
||||
}
|
||||
for _, option := range typeVersion.Options {
|
||||
integrationConfig.Fields[option.PropertyName] = notifierOptionToIntegrationField(option)
|
||||
}
|
||||
return integrationConfig, nil
|
||||
}
|
||||
|
||||
func notifierOptionToIntegrationField(option schema.Field) IntegrationField {
|
||||
f := IntegrationField{
|
||||
Name: option.PropertyName,
|
||||
Secure: option.Secure,
|
||||
Fields: make(map[string]IntegrationField, len(option.SubformOptions)),
|
||||
}
|
||||
for _, subformOption := range option.SubformOptions {
|
||||
f.Fields[subformOption.PropertyName] = notifierOptionToIntegrationField(subformOption)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// IsSecureField returns true if the field is both known and marked as secure in the integration configuration.
|
||||
func (config *IntegrationConfig) IsSecureField(path IntegrationFieldPath) bool {
|
||||
f, ok := config.GetField(path)
|
||||
return ok && f.Secure
|
||||
}
|
||||
|
||||
func (config *IntegrationConfig) GetField(path IntegrationFieldPath) (IntegrationField, bool) {
|
||||
for _, integrationField := range config.Fields {
|
||||
if strings.EqualFold(integrationField.Name, path.Head()) {
|
||||
if path.IsLeaf() {
|
||||
return integrationField, true
|
||||
}
|
||||
return integrationField.GetField(path.Tail())
|
||||
}
|
||||
}
|
||||
return IntegrationField{}, false
|
||||
}
|
||||
|
||||
func (config *IntegrationConfig) GetSecretFields() []IntegrationFieldPath {
|
||||
return traverseFields(config.Fields, nil, func(i IntegrationField) bool {
|
||||
return i.Secure
|
||||
})
|
||||
}
|
||||
|
||||
func traverseFields(flds map[string]IntegrationField, parentPath IntegrationFieldPath, predicate func(i IntegrationField) bool) []IntegrationFieldPath {
|
||||
var result []IntegrationFieldPath
|
||||
for key, field := range flds {
|
||||
path := parentPath.With(key)
|
||||
if predicate(field) {
|
||||
result = append(result, path)
|
||||
}
|
||||
if len(field.Fields) > 0 {
|
||||
result = append(result, traverseFields(field.Fields, path, predicate)...)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (config *IntegrationConfig) Clone() IntegrationConfig {
|
||||
clone := IntegrationConfig{
|
||||
Type: config.Type,
|
||||
Version: config.Version,
|
||||
}
|
||||
|
||||
if len(config.Fields) > 0 {
|
||||
clone.Fields = make(map[string]IntegrationField, len(config.Fields))
|
||||
for key, field := range config.Fields {
|
||||
clone.Fields[key] = field.Clone()
|
||||
}
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
func (field *IntegrationField) GetField(path IntegrationFieldPath) (IntegrationField, bool) {
|
||||
for _, integrationField := range field.Fields {
|
||||
if strings.EqualFold(integrationField.Name, path.Head()) {
|
||||
if path.IsLeaf() {
|
||||
return integrationField, true
|
||||
}
|
||||
return integrationField.GetField(path.Tail())
|
||||
}
|
||||
}
|
||||
return IntegrationField{}, false
|
||||
}
|
||||
|
||||
func (field *IntegrationField) Clone() IntegrationField {
|
||||
f := IntegrationField{
|
||||
Name: field.Name,
|
||||
Secure: field.Secure,
|
||||
Fields: make(map[string]IntegrationField, len(field.Fields)),
|
||||
}
|
||||
for subName, sub := range field.Fields {
|
||||
f.Fields[subName] = sub.Clone()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (integration *Integration) Clone() Integration {
|
||||
return Integration{
|
||||
UID: integration.UID,
|
||||
Name: integration.Name,
|
||||
Config: integration.Config.Clone(),
|
||||
Config: integration.Config,
|
||||
DisableResolveMessage: integration.DisableResolveMessage,
|
||||
Settings: cloneIntegrationSettings(integration.Settings),
|
||||
SecureSettings: maps.Clone(integration.SecureSettings),
|
||||
|
@ -394,7 +215,7 @@ func cloneIntegrationSettingsSlice(src []any) []any {
|
|||
// are stored in SecureSettings and the original values are removed from Settings.
|
||||
// If a field is already in SecureSettings it is not encrypted again.
|
||||
func (integration *Integration) Encrypt(encryptFn EncryptFn) error {
|
||||
secretFieldPaths := integration.Config.GetSecretFields()
|
||||
secretFieldPaths := integration.Config.GetSecretFieldsPaths()
|
||||
if len(secretFieldPaths) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -419,7 +240,7 @@ func (integration *Integration) Encrypt(encryptFn EncryptFn) error {
|
|||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func extractField(settings map[string]any, path IntegrationFieldPath) (string, bool, error) {
|
||||
func extractField(settings map[string]any, path schema.IntegrationFieldPath) (string, bool, error) {
|
||||
val, ok := settings[path.Head()]
|
||||
if !ok {
|
||||
return "", false, nil
|
||||
|
@ -439,7 +260,7 @@ func extractField(settings map[string]any, path IntegrationFieldPath) (string, b
|
|||
return extractField(sub, path.Tail())
|
||||
}
|
||||
|
||||
func getFieldValue(settings map[string]any, path IntegrationFieldPath) (any, bool) {
|
||||
func getFieldValue(settings map[string]any, path schema.IntegrationFieldPath) (any, bool) {
|
||||
val, ok := settings[path.Head()]
|
||||
if !ok {
|
||||
return nil, false
|
||||
|
@ -454,7 +275,7 @@ func getFieldValue(settings map[string]any, path IntegrationFieldPath) (any, boo
|
|||
return getFieldValue(sub, path.Tail())
|
||||
}
|
||||
|
||||
func setField(settings map[string]any, path IntegrationFieldPath, valueFn func(current any) any, skipIfNotExist bool) error {
|
||||
func setField(settings map[string]any, path schema.IntegrationFieldPath, valueFn func(current any) any, skipIfNotExist bool) error {
|
||||
if path.IsLeaf() {
|
||||
current, ok := settings[path.Head()]
|
||||
if skipIfNotExist && !ok {
|
||||
|
@ -489,7 +310,7 @@ func (integration *Integration) Decrypt(decryptFn DecryptFn) error {
|
|||
}
|
||||
delete(integration.SecureSettings, key)
|
||||
|
||||
path := NewIntegrationFieldPath(key)
|
||||
path := schema.ParseIntegrationPath(key)
|
||||
err = setField(integration.Settings, path, func(current any) any {
|
||||
return decrypted
|
||||
}, false)
|
||||
|
@ -503,7 +324,7 @@ func (integration *Integration) Decrypt(decryptFn DecryptFn) error {
|
|||
// Redact redacts all fields in SecureSettings and moves them to Settings.
|
||||
// The original values are removed from SecureSettings.
|
||||
func (integration *Integration) Redact(redactFn RedactFn) {
|
||||
for _, path := range integration.Config.GetSecretFields() {
|
||||
for _, path := range integration.Config.GetSecretFieldsPaths() {
|
||||
_ = setField(integration.Settings, path, func(current any) any {
|
||||
if s, ok := current.(string); ok && s != "" {
|
||||
return redactFn(s)
|
||||
|
@ -513,7 +334,7 @@ func (integration *Integration) Redact(redactFn RedactFn) {
|
|||
}
|
||||
|
||||
for key, secureVal := range integration.SecureSettings { // TODO: Should we trust that the receiver is stored correctly or use known secure settings?
|
||||
_ = setField(integration.Settings, NewIntegrationFieldPath(key), func(any) any {
|
||||
_ = setField(integration.Settings, schema.ParseIntegrationPath(key), func(any) any {
|
||||
return redactFn(secureVal)
|
||||
}, false)
|
||||
delete(integration.SecureSettings, key)
|
||||
|
@ -546,7 +367,7 @@ func (integration *Integration) SecureFields() map[string]bool {
|
|||
}
|
||||
}
|
||||
// We mark secure fields in the settings as well. This is to ensure legacy behaviour for redacted secure settings.
|
||||
for _, path := range integration.Config.GetSecretFields() {
|
||||
for _, path := range integration.Config.GetSecretFieldsPaths() {
|
||||
if secureFields[path.String()] {
|
||||
continue
|
||||
}
|
||||
|
@ -576,7 +397,7 @@ func (integration *Integration) Validate(decryptFn DecryptFn) error {
|
|||
return ValidateIntegration(context.Background(), models.IntegrationConfig{
|
||||
UID: decrypted.UID,
|
||||
Name: decrypted.Name,
|
||||
Type: decrypted.Config.Type,
|
||||
Type: string(decrypted.Config.Type()),
|
||||
DisableResolveMessage: decrypted.DisableResolveMessage,
|
||||
Settings: jsonBytes,
|
||||
SecureSettings: decrypted.SecureSettings,
|
||||
|
@ -627,7 +448,7 @@ func (r *Receiver) Fingerprint() string {
|
|||
sum.writeString(in.Name)
|
||||
|
||||
// Do not include fields in fingerprint as these are not part of the receiver definition.
|
||||
sum.writeString(in.Config.Type)
|
||||
sum.writeString(string(in.Config.Type()))
|
||||
|
||||
sum.writeBool(in.DisableResolveMessage)
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
|
@ -20,7 +19,7 @@ func TestReceiver_Clone(t *testing.T) {
|
|||
receiver Receiver
|
||||
}{
|
||||
{name: "empty receiver", receiver: Receiver{}},
|
||||
{name: "empty integration", receiver: Receiver{Integrations: []*Integration{{Config: IntegrationConfig{}}}}},
|
||||
{name: "empty integration", receiver: Receiver{Integrations: []*Integration{{Config: schema.IntegrationSchemaVersion{}}}}},
|
||||
{name: "random receiver", receiver: ReceiverGen()()},
|
||||
}
|
||||
|
||||
|
@ -48,12 +47,12 @@ func TestReceiver_EncryptDecrypt(t *testing.T) {
|
|||
typeVersion, ok := alertingNotify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
require.True(t, ok)
|
||||
for _, key := range typeVersion.GetSecretFieldsPaths() {
|
||||
val, ok, err := extractField(encrypted.Settings, NewIntegrationFieldPath(key))
|
||||
val, ok, err := extractField(encrypted.Settings, key)
|
||||
assert.NoError(t, err)
|
||||
if ok {
|
||||
encryptedVal, err := encryptFn(val)
|
||||
assert.NoError(t, err)
|
||||
encrypted.SecureSettings[key] = encryptedVal
|
||||
encrypted.SecureSettings[key.String()] = encryptedVal
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,9 +83,9 @@ func TestIntegration_Redact(t *testing.T) {
|
|||
version, ok := alertingNotify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
require.True(t, ok)
|
||||
for _, key := range version.GetSecretFieldsPaths() {
|
||||
err := setField(expected.Settings, NewIntegrationFieldPath(key), func(current any) any {
|
||||
err := setField(expected.Settings, key, func(current any) any {
|
||||
if s, isString := current.(string); isString && s != "" {
|
||||
delete(expected.SecureSettings, key)
|
||||
delete(expected.SecureSettings, key.String())
|
||||
return redactFn(s)
|
||||
}
|
||||
return current
|
||||
|
@ -242,53 +241,37 @@ func TestSecretsIntegrationConfig(t *testing.T) {
|
|||
// Test that all known integration types have a config and correctly mark their secrets as secure.
|
||||
for integrationType := range notifytest.AllKnownV1ConfigsForTesting {
|
||||
t.Run(string(integrationType), func(t *testing.T) {
|
||||
schemaType, ok := alertingNotify.GetSchemaForIntegration(integrationType)
|
||||
config, ok := alertingNotify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
require.True(t, ok)
|
||||
|
||||
config, err := IntegrationConfigFromSchema(schemaType, schema.V1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
version, ok := schemaType.GetVersion(schema.V1)
|
||||
require.True(t, ok)
|
||||
|
||||
secrets := version.GetSecretFieldsPaths()
|
||||
secrets := config.GetSecretFieldsPaths()
|
||||
allSecrets := make(map[string]struct{}, len(secrets))
|
||||
for _, key := range secrets {
|
||||
allSecrets[key] = struct{}{}
|
||||
allSecrets[key.String()] = struct{}{}
|
||||
}
|
||||
|
||||
secretFields := config.GetSecretFields()
|
||||
secretFields := config.GetSecretFieldsPaths()
|
||||
for _, path := range secretFields {
|
||||
_, isSecret := allSecrets[path.String()]
|
||||
assert.Equalf(t, isSecret, config.IsSecureField(path), "field '%s' is expected to be secret", path)
|
||||
delete(allSecrets, path.String())
|
||||
}
|
||||
assert.False(t, config.IsSecureField(IntegrationFieldPath{"__--**unknown_field**--__"}))
|
||||
assert.False(t, config.IsSecureField(schema.ParseIntegrationPath("__--**unknown_field**--__")))
|
||||
assert.Empty(t, allSecrets, "mismatched secret fields for integration type %s: %v", integrationType, allSecrets)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Unknown version returns error", func(t *testing.T) {
|
||||
for s := range maps.Keys(notifytest.AllKnownV1ConfigsForTesting) {
|
||||
schemaType, _ := alertingNotify.GetSchemaForIntegration(s)
|
||||
_, err := IntegrationConfigFromSchema(schemaType, "unknown")
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration_SecureFields(t *testing.T) {
|
||||
testutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
// Test that all known integration types have a config and correctly mark their secrets as secure.
|
||||
for it := range notifytest.AllKnownV1ConfigsForTesting {
|
||||
integrationType := it
|
||||
for integrationType := range notifytest.AllKnownV1ConfigsForTesting {
|
||||
t.Run(string(integrationType), func(t *testing.T) {
|
||||
t.Run("contains SecureSettings", func(t *testing.T) {
|
||||
validIntegration := IntegrationGen(IntegrationMuts.WithValidConfig(integrationType))()
|
||||
expected := make(map[string]bool, len(validIntegration.SecureSettings))
|
||||
for _, path := range validIntegration.Config.GetSecretFields() {
|
||||
for _, path := range validIntegration.Config.GetSecretFieldsPaths() {
|
||||
if validIntegration.Config.IsSecureField(path) {
|
||||
expected[path.String()] = true
|
||||
validIntegration.SecureSettings[path.String()] = "test"
|
||||
|
@ -303,7 +286,7 @@ func TestIntegration_SecureFields(t *testing.T) {
|
|||
t.Run("contains secret Settings not in SecureSettings", func(t *testing.T) {
|
||||
validIntegration := IntegrationGen(IntegrationMuts.WithValidConfig(integrationType))()
|
||||
expected := make(map[string]bool, len(validIntegration.SecureSettings))
|
||||
for _, path := range validIntegration.Config.GetSecretFields() {
|
||||
for _, path := range validIntegration.Config.GetSecretFieldsPaths() {
|
||||
if validIntegration.Config.IsSecureField(path) {
|
||||
expected[path.String()] = true
|
||||
assert.NoError(t, setField(validIntegration.Settings, path, func(current any) any {
|
||||
|
@ -341,8 +324,7 @@ func TestReceiver_Fingerprint(t *testing.T) {
|
|||
"setting": "value",
|
||||
"something": 123,
|
||||
"data": []string{"test"},
|
||||
} // Add a broken type to ensure it is stable in the fingerprint.
|
||||
baseReceiver.Integrations[0].Config = IntegrationConfig{Type: baseReceiver.Integrations[0].Config.Type} // Remove all fields except Type.
|
||||
}
|
||||
|
||||
completelyDifferentReceiver := ReceiverGen(ReceiverMuts.WithName("test receiver2"), ReceiverMuts.WithIntegrations(
|
||||
IntegrationGen(im.WithName("test receiver2"), im.WithValidConfig("discord"))(),
|
||||
|
@ -351,7 +333,6 @@ func TestReceiver_Fingerprint(t *testing.T) {
|
|||
completelyDifferentReceiver.Integrations[0].DisableResolveMessage = false
|
||||
completelyDifferentReceiver.Integrations[0].SecureSettings = map[string]string{"test": "test"}
|
||||
completelyDifferentReceiver.Provenance = ProvenanceAPI
|
||||
completelyDifferentReceiver.Integrations[0].Config = IntegrationConfig{Type: completelyDifferentReceiver.Integrations[0].Config.Type} // Remove all fields except Type.
|
||||
|
||||
t.Run("stable across code changes", func(t *testing.T) {
|
||||
expectedFingerprint := "c0c82936be34b183" // If this is a valid fingerprint generation change, update the expected value.
|
||||
|
|
|
@ -1323,9 +1323,7 @@ func (n IntegrationMutators) WithValidConfig(integrationType schema.IntegrationT
|
|||
panic(fmt.Sprintf("unknown integration type: %s", integrationType))
|
||||
}
|
||||
config := ncfg.GetRawNotifierConfig(c.Name)
|
||||
typeSchema, _ := alertingNotify.GetSchemaForIntegration(integrationType)
|
||||
integrationConfig, _ := IntegrationConfigFromSchema(typeSchema, schema.V1)
|
||||
c.Config = integrationConfig
|
||||
c.Config, _ = alertingNotify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
|
||||
var settings map[string]any
|
||||
_ = json.Unmarshal(config.Settings, &settings)
|
||||
|
@ -1342,11 +1340,11 @@ func (n IntegrationMutators) WithValidConfig(integrationType schema.IntegrationT
|
|||
|
||||
func (n IntegrationMutators) WithInvalidConfig(integrationType schema.IntegrationType) Mutator[Integration] {
|
||||
return func(c *Integration) {
|
||||
typeSchema, ok := alertingNotify.GetSchemaForIntegration(integrationType)
|
||||
var ok bool
|
||||
c.Config, ok = alertingNotify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unknown integration type: %s", integrationType))
|
||||
}
|
||||
c.Config, _ = IntegrationConfigFromSchema(typeSchema, schema.V1)
|
||||
c.Settings = map[string]interface{}{}
|
||||
c.SecureSettings = map[string]string{}
|
||||
if integrationType == webex.Type {
|
||||
|
|
|
@ -113,7 +113,7 @@ func encryptReceiverConfigs(c []*definitions.PostableApiReceiver, encrypt defini
|
|||
if !ok {
|
||||
return fmt.Errorf("failed to get secret keys for contact point type %s", gr.Type)
|
||||
}
|
||||
secretKeys := typeSchema.GetSecretFieldsPaths()
|
||||
secretPaths := typeSchema.GetSecretFieldsPaths()
|
||||
secureSettings := gr.SecureSettings
|
||||
if secureSettings == nil {
|
||||
secureSettings = make(map[string]string)
|
||||
|
@ -121,7 +121,8 @@ func encryptReceiverConfigs(c []*definitions.PostableApiReceiver, encrypt defini
|
|||
|
||||
settingsChanged := false
|
||||
secureSettingsChanged := false
|
||||
for _, secretKey := range secretKeys {
|
||||
for _, secretPath := range secretPaths {
|
||||
secretKey := secretPath.String()
|
||||
settingsValue, ok := settings[secretKey]
|
||||
if !ok {
|
||||
continue
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"maps"
|
||||
|
||||
alertingNotify "github.com/grafana/alerting/notify"
|
||||
"github.com/grafana/alerting/receivers/schema"
|
||||
|
||||
apimodels "github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
|
||||
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||
|
@ -26,7 +27,7 @@ func IntegrationToPostableGrafanaReceiver(integration *models.Integration) (*api
|
|||
postable := &apimodels.PostableGrafanaReceiver{
|
||||
UID: integration.UID,
|
||||
Name: integration.Name,
|
||||
Type: integration.Config.Type,
|
||||
Type: string(integration.Config.Type()),
|
||||
DisableResolveMessage: integration.DisableResolveMessage,
|
||||
SecureSettings: maps.Clone(integration.SecureSettings),
|
||||
}
|
||||
|
@ -117,10 +118,14 @@ func PostableGrafanaReceiversToIntegrations(postables []*apimodels.PostableGrafa
|
|||
}
|
||||
|
||||
func PostableGrafanaReceiverToIntegration(p *apimodels.PostableGrafanaReceiver) (*models.Integration, error) {
|
||||
config, err := models.IntegrationConfigFromType(p.Type, nil)
|
||||
integrationType, err := alertingNotify.IntegrationTypeFromString(p.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, ok := alertingNotify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("integration type [%s] does not have schema of version %s", integrationType, schema.V1)
|
||||
}
|
||||
integration := &models.Integration{
|
||||
UID: p.UID,
|
||||
Name: p.Name,
|
||||
|
@ -132,7 +137,7 @@ func PostableGrafanaReceiverToIntegration(p *apimodels.PostableGrafanaReceiver)
|
|||
|
||||
if p.Settings != nil {
|
||||
if err := json.Unmarshal(p.Settings, &integration.Settings); err != nil {
|
||||
return nil, fmt.Errorf("integration '%s' of receiver '%s' has settings that cannot be parsed as JSON: %w", integration.Config.Type, p.Name, err)
|
||||
return nil, fmt.Errorf("integration '%s' of receiver '%s' has settings that cannot be parsed as JSON: %w", integration.Config.Type(), p.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -94,9 +94,7 @@ func TestDeleteReceiver(t *testing.T) {
|
|||
|
||||
func TestCreateReceiver(t *testing.T) {
|
||||
rawCfg := notifytest.AllKnownV1ConfigsForTesting[webhook.Type]
|
||||
typeSchema, _ := notify.GetSchemaForIntegration(webhook.Type)
|
||||
cfgSchema, err := models.IntegrationConfigFromSchema(typeSchema, schema.V1)
|
||||
require.NoError(t, err)
|
||||
cfgSchema, _ := notify.GetSchemaVersionForIntegration(webhook.Type, schema.V1)
|
||||
settings := map[string]any{}
|
||||
require.NoError(t, json.Unmarshal([]byte(rawCfg.Config), &settings))
|
||||
|
||||
|
@ -201,9 +199,7 @@ func TestCreateReceiver(t *testing.T) {
|
|||
|
||||
func TestUpdateReceiver(t *testing.T) {
|
||||
rawCfg := notifytest.AllKnownV1ConfigsForTesting[webhook.Type]
|
||||
typeSchema, _ := notify.GetSchemaForIntegration(webhook.Type)
|
||||
cfgSchema, err := models.IntegrationConfigFromSchema(typeSchema, schema.V1)
|
||||
require.NoError(t, err)
|
||||
cfgSchema, _ := notify.GetSchemaVersionForIntegration(webhook.Type, schema.V1)
|
||||
settings := map[string]any{}
|
||||
require.NoError(t, json.Unmarshal([]byte(rawCfg.Config), &settings))
|
||||
|
||||
|
@ -302,9 +298,7 @@ func TestUpdateReceiver(t *testing.T) {
|
|||
|
||||
func TestGetReceiver(t *testing.T) {
|
||||
rawCfg := notifytest.AllKnownV1ConfigsForTesting[webhook.Type]
|
||||
typeSchema, _ := notify.GetSchemaForIntegration(webhook.Type)
|
||||
cfgSchema, err := models.IntegrationConfigFromSchema(typeSchema, schema.V1)
|
||||
require.NoError(t, err)
|
||||
cfgSchema, _ := notify.GetSchemaVersionForIntegration(webhook.Type, schema.V1)
|
||||
settings := map[string]any{}
|
||||
require.NoError(t, json.Unmarshal([]byte(rawCfg.Config), &settings))
|
||||
|
||||
|
|
|
@ -465,7 +465,7 @@ func TestReceiverService_Create(t *testing.T) {
|
|||
{
|
||||
UID: lineIntegration.UID,
|
||||
Name: lineIntegration.Name,
|
||||
Type: lineIntegration.Config.Type,
|
||||
Type: string(lineIntegration.Config.Type()),
|
||||
DisableResolveMessage: lineIntegration.DisableResolveMessage,
|
||||
Settings: definitions.RawMessage(`{}`), // Empty settings, not nil.
|
||||
SecureSettings: map[string]string{
|
||||
|
|
|
@ -61,7 +61,7 @@ func GrafanaIntegrationConfigToEmbeddedContactPoint(r *models.Integration, prove
|
|||
return definitions.EmbeddedContactPoint{
|
||||
UID: r.UID,
|
||||
Name: r.Name,
|
||||
Type: r.Config.Type,
|
||||
Type: string(r.Config.Type()),
|
||||
DisableResolveMessage: r.DisableResolveMessage,
|
||||
Settings: settingJson,
|
||||
Provenance: string(provenance),
|
||||
|
|
|
@ -251,7 +251,8 @@ func (ecp *ContactPointService) UpdateContactPoint(ctx context.Context, orgID in
|
|||
if !ok {
|
||||
return fmt.Errorf("%w: failed to get secret keys for contact point type %s", ErrValidation, contactPoint.Type)
|
||||
}
|
||||
for _, secretKey := range typeSchema.GetSecretFieldsPaths() {
|
||||
for _, secretPath := range typeSchema.GetSecretFieldsPaths() {
|
||||
secretKey := secretPath.String()
|
||||
secretValue := contactPoint.Settings.Get(secretKey).MustString()
|
||||
if secretValue == apimodels.RedactedValue {
|
||||
contactPoint.Settings.Set(secretKey, rawContactPoint.Settings.Get(secretKey).MustString())
|
||||
|
@ -526,7 +527,8 @@ func RemoveSecretsForContactPoint(e *apimodels.EmbeddedContactPoint) (map[string
|
|||
if !ok {
|
||||
return nil, fmt.Errorf("failed to get secret keys for contact point type %s", e.Type)
|
||||
}
|
||||
for _, secretKey := range typeSchema.GetSecretFieldsPaths() {
|
||||
for _, secretPath := range typeSchema.GetSecretFieldsPaths() {
|
||||
secretKey := secretPath.String()
|
||||
secretValue, err := extractCaseInsensitive(e.Settings, secretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -461,9 +461,9 @@ func TestRemoveSecretsForContactPoint(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
FIELDS_ASSERT:
|
||||
for _, field := range expectedFields {
|
||||
for _, path := range expectedFields {
|
||||
field := path.String()
|
||||
assert.Contains(t, secureFields, field)
|
||||
path := strings.Split(field, ".")
|
||||
var expectedValue any = integration.Settings
|
||||
for _, segment := range path {
|
||||
v, ok := expectedValue.(map[string]any)
|
||||
|
|
|
@ -1320,7 +1320,8 @@ func TestIntegrationCRUD(t *testing.T) {
|
|||
typeSchema, ok := notify.GetSchemaVersionForIntegration(integrationType, schema.V1)
|
||||
require.True(t, ok)
|
||||
secretFields := typeSchema.GetSecretFieldsPaths()
|
||||
for _, field := range secretFields {
|
||||
for _, fieldPath := range secretFields {
|
||||
field := fieldPath.String()
|
||||
if _, ok := fields[field]; !ok { // skip field that is not in the original setting
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: testdata.datasource.grafana.app/v0alpha1
|
||||
kind: DataSource
|
||||
metadata:
|
||||
name: sample-testdata
|
||||
spec:
|
||||
title: Sample datasource
|
||||
access: proxy
|
||||
isDefault: true
|
||||
jsonData:
|
||||
key: value
|
||||
hello: 10
|
||||
world: false
|
||||
secure:
|
||||
sampleA:
|
||||
create: secret value here # replaced with UID on write
|
||||
sampleB:
|
||||
name: XYZ # reference to a existing secret
|
|
@ -2,13 +2,16 @@ package dashboards
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/tests/apis"
|
||||
|
@ -38,97 +41,58 @@ func TestIntegrationTestDatasource(t *testing.T) {
|
|||
Type: datasources.DS_TESTDATA,
|
||||
UID: "test",
|
||||
OrgID: int64(1),
|
||||
|
||||
// These settings are not actually used, but testing that they get saved
|
||||
Database: "testdb",
|
||||
URL: "http://fake.url",
|
||||
Access: datasources.DS_ACCESS_PROXY,
|
||||
User: "example",
|
||||
ReadOnly: true,
|
||||
JsonData: simplejson.NewFromAny(map[string]any{
|
||||
"hello": "world",
|
||||
}),
|
||||
SecureJsonData: map[string]string{
|
||||
"aaa": "AAA",
|
||||
"bbb": "BBB",
|
||||
},
|
||||
})
|
||||
require.Equal(t, "test", ds.UID)
|
||||
|
||||
t.Run("Check discovery client", func(t *testing.T) {
|
||||
disco := helper.GetGroupVersionInfoJSON("testdata.datasource.grafana.app")
|
||||
fmt.Printf("%s", disco)
|
||||
t.Run("Admin configs", func(t *testing.T) {
|
||||
client := helper.Org1.Admin.ResourceClient(t, schema.GroupVersionResource{
|
||||
Group: "testdata.datasource.grafana.app",
|
||||
Version: "v0alpha1",
|
||||
Resource: "datasources",
|
||||
}).Namespace("default")
|
||||
ctx := context.Background()
|
||||
|
||||
require.JSONEq(t, `[
|
||||
{
|
||||
"freshness": "Current",
|
||||
"resources": [
|
||||
{
|
||||
"resource": "connections",
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "DataSourceConnection",
|
||||
"version": ""
|
||||
},
|
||||
"scope": "Namespaced",
|
||||
"shortNames": [
|
||||
"grafana-testdata-datasource-connection"
|
||||
],
|
||||
"singularResource": "connection",
|
||||
"subresources": [
|
||||
{
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "HealthCheckResult",
|
||||
"version": ""
|
||||
},
|
||||
"subresource": "health",
|
||||
"verbs": [
|
||||
"get"
|
||||
]
|
||||
},
|
||||
{
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "QueryDataResponse",
|
||||
"version": ""
|
||||
},
|
||||
"subresource": "query",
|
||||
"verbs": [
|
||||
"create"
|
||||
]
|
||||
},
|
||||
{
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "Status",
|
||||
"version": ""
|
||||
},
|
||||
"subresource": "resource",
|
||||
"verbs": [
|
||||
"create",
|
||||
"delete",
|
||||
"get",
|
||||
"patch",
|
||||
"update"
|
||||
]
|
||||
}
|
||||
],
|
||||
"verbs": [
|
||||
"get",
|
||||
"list"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "queryconvert",
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "QueryDataRequest",
|
||||
"version": ""
|
||||
},
|
||||
"scope": "Namespaced",
|
||||
"singularResource": "queryconvert",
|
||||
"verbs": [
|
||||
"create"
|
||||
]
|
||||
}
|
||||
],
|
||||
"version": "v0alpha1"
|
||||
}
|
||||
]`, disco)
|
||||
list, err := client.List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list.Items, 1, "expected a single connection")
|
||||
require.Equal(t, "test", list.Items[0].GetName(), "with the test uid")
|
||||
|
||||
spec, _, _ := unstructured.NestedMap(list.Items[0].Object, "spec")
|
||||
jj, _ := json.MarshalIndent(spec, "", " ")
|
||||
fmt.Printf("%s\n", string(jj))
|
||||
require.JSONEq(t, `{
|
||||
"access": "proxy",
|
||||
"database": "testdb",
|
||||
"isDefault": true,
|
||||
"jsonData": {
|
||||
"hello": "world"
|
||||
},
|
||||
"readOnly": true,
|
||||
"title": "test",
|
||||
"url": "http://fake.url",
|
||||
"user": "example"
|
||||
}`, string(jj))
|
||||
})
|
||||
|
||||
t.Run("Call subresources", func(t *testing.T) {
|
||||
client := helper.Org1.Admin.ResourceClient(t, schema.GroupVersionResource{
|
||||
Group: "testdata.datasource.grafana.app",
|
||||
Version: "v0alpha1",
|
||||
Resource: "connections",
|
||||
Resource: "datasources",
|
||||
}).Namespace("default")
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -155,7 +119,7 @@ func TestIntegrationTestDatasource(t *testing.T) {
|
|||
raw := apis.DoRequest[any](helper, apis.RequestParams{
|
||||
User: helper.Org1.Admin,
|
||||
Method: "GET",
|
||||
Path: "/apis/testdata.datasource.grafana.app/v0alpha1/namespaces/default/connections/test/resource",
|
||||
Path: "/apis/testdata.datasource.grafana.app/v0alpha1/namespaces/default/datasources/test/resource",
|
||||
}, nil)
|
||||
require.Equal(t, `Hello world from test datasource!`, string(raw.Body))
|
||||
})
|
||||
|
|
|
@ -1,3 +1,27 @@
|
|||
import { generatedAPI } from './endpoints.gen';
|
||||
|
||||
export const shortURLAPIv1alpha1 = generatedAPI.enhanceEndpoints({});
|
||||
export const shortURLAPIv1alpha1 = generatedAPI.enhanceEndpoints({
|
||||
endpoints: {
|
||||
createShortUrl: (endpointDefinition) => {
|
||||
const originalQuery = endpointDefinition.query;
|
||||
if (!originalQuery) {
|
||||
return;
|
||||
}
|
||||
|
||||
endpointDefinition.query = (requestOptions) => {
|
||||
// Ensure metadata exists
|
||||
if (!requestOptions.shortUrl.metadata) {
|
||||
requestOptions.shortUrl.metadata = {};
|
||||
}
|
||||
|
||||
const metadata = requestOptions.shortUrl.metadata;
|
||||
if (!metadata.name && !metadata.generateName) {
|
||||
// GenerateName lets the apiserver create a new uid for the name
|
||||
// This wont be used, the backend will generate a random uid but cannot be blank or will fail.
|
||||
metadata.generateName = 's-';
|
||||
}
|
||||
return originalQuery(requestOptions);
|
||||
};
|
||||
},
|
||||
},
|
||||
});
|
||||
|
|
|
@ -49,7 +49,7 @@ export const createShortLink = async function (path: string) {
|
|||
generatedAPI.endpoints.createShortUrl.initiate({
|
||||
shortUrl: {
|
||||
apiVersion: 'shorturl.grafana.app/v1alpha1',
|
||||
kind: 'Playlist',
|
||||
kind: 'ShortURL',
|
||||
metadata: {},
|
||||
spec: {
|
||||
path: getRelativeURLPath(path),
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
# Codeowners Manifest Scripts
|
||||
|
||||
Scripts for generating and caching CODEOWNERS manifest data.
|
||||
|
||||
Each of these scripts can be run individually if needed, but `index.js` is most useful because it combines them all.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Combined script
|
||||
node index.js # Generate complete manifest with caching
|
||||
|
||||
# Individual scripts
|
||||
node metadata.js # Generate metadata with hashes
|
||||
node raw.js # Generate raw audit data
|
||||
node generate.js # Process raw data into manifest files
|
||||
```
|
||||
|
||||
## Control flow of `index.js`
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[index.js] --> B[metadata.js: Generate new metadata]
|
||||
B --> C{Existing metadata exists?}
|
||||
C -->|No| D[Generate all files]
|
||||
C -->|Yes| E{Hashes match?}
|
||||
E -->|No| D
|
||||
E -->|Yes| F[Skip generation]
|
||||
|
||||
D --> G[raw.js: Generate audit data]
|
||||
G --> H[generate.js: Process into JSON files]
|
||||
H --> I[Save new metadata]
|
||||
I --> J[Complete]
|
||||
|
||||
F --> J
|
||||
|
||||
style F fill:#e1f5fe
|
||||
style J fill:#e8f5e8
|
||||
```
|
||||
|
||||
## Default output
|
||||
|
||||
By default these scripts will write the following files to the `/codeowners-manifest/*` directory.
|
||||
|
||||
- `audit-raw.jsonl` - Raw CODEOWNERS audit data in JSONL format _(for fast stream processing)_
|
||||
- `teams.json` - List of all codeowners _(for validating codeowner names)_
|
||||
- `teams-by-filename.json` - Files mapped to their respective codeowners
|
||||
- `filenames-by-team.json` - Codeowners mapped to their respective files
|
||||
- `metadata.json` - Hashes for cache validation
|
|
@ -0,0 +1,11 @@
|
|||
const CODEOWNERS_MANIFEST_DIR = 'codeowners-manifest';
|
||||
|
||||
module.exports = {
|
||||
CODEOWNERS_FILE_PATH: '.github/CODEOWNERS',
|
||||
CODEOWNERS_MANIFEST_DIR,
|
||||
RAW_AUDIT_JSONL_PATH: `${CODEOWNERS_MANIFEST_DIR}/audit-raw.jsonl`,
|
||||
CODEOWNERS_JSON_PATH: `${CODEOWNERS_MANIFEST_DIR}/teams.json`,
|
||||
CODEOWNERS_BY_FILENAME_JSON_PATH: `${CODEOWNERS_MANIFEST_DIR}/teams-by-filename.json`,
|
||||
FILENAMES_BY_CODEOWNER_JSON_PATH: `${CODEOWNERS_MANIFEST_DIR}/filenames-by-team.json`,
|
||||
METADATA_JSON_PATH: `${CODEOWNERS_MANIFEST_DIR}/metadata.json`,
|
||||
};
|
|
@ -0,0 +1,100 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const fs = require('node:fs');
|
||||
const { stat, writeFile } = require('node:fs/promises');
|
||||
const readline = require('node:readline');
|
||||
|
||||
const {
|
||||
RAW_AUDIT_JSONL_PATH,
|
||||
CODEOWNERS_BY_FILENAME_JSON_PATH,
|
||||
FILENAMES_BY_CODEOWNER_JSON_PATH,
|
||||
CODEOWNERS_JSON_PATH,
|
||||
} = require('./constants.js');
|
||||
|
||||
/**
|
||||
* Generate codeowners manifest files from raw audit data
|
||||
* @param {string} rawAuditPath - Path to the raw audit JSONL file
|
||||
* @param {string} codeownersJsonPath - Path to write teams.json
|
||||
* @param {string} codeownersByFilenamePath - Path to write teams-by-filename.json
|
||||
* @param {string} filenamesByCodeownerPath - Path to write filenames-by-team.json
|
||||
*/
|
||||
async function generateCodeownersManifest(
|
||||
rawAuditPath,
|
||||
codeownersJsonPath,
|
||||
codeownersByFilenamePath,
|
||||
filenamesByCodeownerPath
|
||||
) {
|
||||
const hasRawAuditJsonl = await stat(rawAuditPath);
|
||||
if (!hasRawAuditJsonl) {
|
||||
throw new Error(
|
||||
`No raw CODEOWNERS audit JSONL file found at: ${rawAuditPath} ... run "yarn codeowners-manifest:raw"`
|
||||
);
|
||||
}
|
||||
|
||||
const auditFileInput = fs.createReadStream(rawAuditPath);
|
||||
|
||||
const lineReader = readline.createInterface({
|
||||
input: auditFileInput,
|
||||
crlfDelay: Infinity,
|
||||
});
|
||||
|
||||
let codeowners = new Set();
|
||||
let codeownersByFilename = new Map();
|
||||
let filenamesByCodeowner = new Map();
|
||||
|
||||
lineReader.on('error', (error) => {
|
||||
console.error('Error reading file:', error);
|
||||
throw error;
|
||||
});
|
||||
|
||||
lineReader.on('line', (line) => {
|
||||
try {
|
||||
const { path, owners: fileOwners } = JSON.parse(line.toString().trim());
|
||||
|
||||
for (let owner of fileOwners) {
|
||||
codeowners.add(owner);
|
||||
}
|
||||
|
||||
codeownersByFilename.set(path, fileOwners);
|
||||
|
||||
for (let owner of fileOwners) {
|
||||
const filenames = filenamesByCodeowner.get(owner) || [];
|
||||
filenamesByCodeowner.set(owner, filenames.concat(path));
|
||||
}
|
||||
} catch (parseError) {
|
||||
console.error(`Error parsing line: ${line}`, parseError);
|
||||
throw parseError;
|
||||
}
|
||||
});
|
||||
|
||||
await new Promise((resolve) => lineReader.once('close', resolve));
|
||||
|
||||
await Promise.all([
|
||||
writeFile(codeownersJsonPath, JSON.stringify(Array.from(codeowners).sort(), null, 2)),
|
||||
writeFile(codeownersByFilenamePath, JSON.stringify(Object.fromEntries(codeownersByFilename), null, 2)),
|
||||
writeFile(filenamesByCodeownerPath, JSON.stringify(Object.fromEntries(filenamesByCodeowner), null, 2)),
|
||||
]);
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
(async () => {
|
||||
try {
|
||||
console.log(`📋 Generating files ↔ teams manifests from ${RAW_AUDIT_JSONL_PATH} ...`);
|
||||
await generateCodeownersManifest(
|
||||
RAW_AUDIT_JSONL_PATH,
|
||||
CODEOWNERS_JSON_PATH,
|
||||
CODEOWNERS_BY_FILENAME_JSON_PATH,
|
||||
FILENAMES_BY_CODEOWNER_JSON_PATH
|
||||
);
|
||||
console.log('✅ Manifest files generated:');
|
||||
console.log(` • ${CODEOWNERS_JSON_PATH}`);
|
||||
console.log(` • ${CODEOWNERS_BY_FILENAME_JSON_PATH}`);
|
||||
console.log(` • ${FILENAMES_BY_CODEOWNER_JSON_PATH}`);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
process.exit(1);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
module.exports = { generateCodeownersManifest };
|
|
@ -0,0 +1,101 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const { writeFile, readFile, mkdir, access } = require('node:fs/promises');
|
||||
|
||||
const {
|
||||
CODEOWNERS_FILE_PATH,
|
||||
CODEOWNERS_MANIFEST_DIR,
|
||||
RAW_AUDIT_JSONL_PATH,
|
||||
CODEOWNERS_BY_FILENAME_JSON_PATH,
|
||||
FILENAMES_BY_CODEOWNER_JSON_PATH,
|
||||
CODEOWNERS_JSON_PATH,
|
||||
METADATA_JSON_PATH,
|
||||
} = require('./constants.js');
|
||||
const { generateCodeownersManifest } = require('./generate.js');
|
||||
const { generateCodeownersMetadata } = require('./metadata.js');
|
||||
const { generateCodeownersRawAudit } = require('./raw.js');
|
||||
|
||||
/**
|
||||
* Generate complete codeowners manifest including raw audit, metadata, and processed files
|
||||
* @param {string} codeownersFilePath - Path to CODEOWNERS file
|
||||
* @param {string} manifestDir - Directory for manifest files
|
||||
* @param {string} rawAuditPath - Path for raw audit JSONL file
|
||||
* @param {string} codeownersJsonPath - Path for teams.json
|
||||
* @param {string} codeownersByFilenamePath - Path for teams-by-filename.json
|
||||
* @param {string} filenamesByCodeownerPath - Path for filenames-by-team.json
|
||||
* @param {string} metadataPath - Path for metadata.json
|
||||
*/
|
||||
async function generateCodeownersManifestComplete(
|
||||
codeownersFilePath,
|
||||
manifestDir,
|
||||
rawAuditPath,
|
||||
codeownersJsonPath,
|
||||
codeownersByFilenamePath,
|
||||
filenamesByCodeownerPath,
|
||||
metadataPath
|
||||
) {
|
||||
try {
|
||||
await access(manifestDir);
|
||||
} catch (error) {
|
||||
await mkdir(manifestDir, { recursive: true });
|
||||
}
|
||||
|
||||
const newMetadata = generateCodeownersMetadata(codeownersFilePath, manifestDir, 'metadata.json');
|
||||
|
||||
let isCacheUpToDate = false;
|
||||
try {
|
||||
const existingMetadata = JSON.parse(await readFile(metadataPath, 'utf8'));
|
||||
if (
|
||||
existingMetadata.filesHash === newMetadata.filesHash &&
|
||||
existingMetadata.codeownersHash === newMetadata.codeownersHash
|
||||
) {
|
||||
isCacheUpToDate = true;
|
||||
}
|
||||
} catch (error) {
|
||||
isCacheUpToDate = false;
|
||||
}
|
||||
|
||||
if (!isCacheUpToDate) {
|
||||
await generateCodeownersRawAudit(codeownersFilePath, rawAuditPath);
|
||||
await generateCodeownersManifest(
|
||||
rawAuditPath,
|
||||
codeownersJsonPath,
|
||||
codeownersByFilenamePath,
|
||||
filenamesByCodeownerPath
|
||||
);
|
||||
await writeFile(metadataPath, JSON.stringify(newMetadata, null, 2), 'utf8');
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
(async () => {
|
||||
try {
|
||||
console.log('📋 Generating complete codeowners manifest...');
|
||||
|
||||
const wasGenerated = await generateCodeownersManifestComplete(
|
||||
CODEOWNERS_FILE_PATH,
|
||||
CODEOWNERS_MANIFEST_DIR,
|
||||
RAW_AUDIT_JSONL_PATH,
|
||||
CODEOWNERS_JSON_PATH,
|
||||
CODEOWNERS_BY_FILENAME_JSON_PATH,
|
||||
FILENAMES_BY_CODEOWNER_JSON_PATH,
|
||||
METADATA_JSON_PATH
|
||||
);
|
||||
|
||||
if (wasGenerated) {
|
||||
console.log('✅ Complete manifest generated:');
|
||||
console.log(` • ${CODEOWNERS_MANIFEST_DIR}/`);
|
||||
} else {
|
||||
console.log('✅ Manifest up-to-date, skipped generation');
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('❌ Error generating codeowners manifest:', e.message);
|
||||
process.exit(1);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
module.exports = { generateCodeownersManifestComplete };
|
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const { execSync } = require('node:child_process');
|
||||
const { writeFile, mkdir, access } = require('node:fs/promises');
|
||||
|
||||
const { CODEOWNERS_FILE_PATH, CODEOWNERS_MANIFEST_DIR, METADATA_JSON_PATH } = require('./constants.js');
|
||||
|
||||
/**
|
||||
* @typedef {Object} CodeownersMetadata
|
||||
* @property {string} generatedAt - ISO timestamp when metadata was generated
|
||||
* @property {string} filesHash - SHA-256 hash of all repository files
|
||||
* @property {string} codeownersHash - SHA-256 hash of CODEOWNERS file
|
||||
*/
|
||||
|
||||
/**
|
||||
* Generate codeowners metadata for caching
|
||||
* @param {string} codeownersFilePath - Path to CODEOWNERS file
|
||||
* @param {string} manifestDir - Directory for manifest files
|
||||
* @param {string} metadataFilename - Filename for metadata file
|
||||
* @returns {CodeownersMetadata} Metadata object with hashes
|
||||
*/
|
||||
function generateCodeownersMetadata(codeownersFilePath, manifestDir, metadataFilename) {
|
||||
const [filesHash] = execSync('git ls-files --cached --others --exclude-standard | sort | sha256sum', {
|
||||
encoding: 'utf8',
|
||||
})
|
||||
.trim()
|
||||
.split(' ');
|
||||
|
||||
const [codeownersHash] = execSync(`sha256sum "${codeownersFilePath}"`, { encoding: 'utf8' }).trim().split(' ');
|
||||
|
||||
return {
|
||||
generatedAt: new Date().toISOString(),
|
||||
filesHash,
|
||||
codeownersHash,
|
||||
};
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
(async () => {
|
||||
try {
|
||||
console.log('⚙️ Generating codeowners-manifest metadata ...');
|
||||
|
||||
try {
|
||||
await access(CODEOWNERS_MANIFEST_DIR);
|
||||
} catch (error) {
|
||||
await mkdir(CODEOWNERS_MANIFEST_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
const metadata = generateCodeownersMetadata(CODEOWNERS_FILE_PATH, CODEOWNERS_MANIFEST_DIR, METADATA_JSON_PATH);
|
||||
|
||||
await writeFile(METADATA_JSON_PATH, JSON.stringify(metadata, null, 2), 'utf8');
|
||||
console.log('✅ Metadata generated:');
|
||||
console.log(` • ${METADATA_JSON_PATH}`);
|
||||
} catch (error) {
|
||||
console.error('❌ Error generating codeowners metadata:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
module.exports = { generateCodeownersMetadata };
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue