Merge branch 'main' into api-client/headers
CodeQL checks / Detect whether code changed (push) Has been cancelled Details
CodeQL checks / Analyze (actions) (push) Has been cancelled Details
CodeQL checks / Analyze (go) (push) Has been cancelled Details
CodeQL checks / Analyze (javascript) (push) Has been cancelled Details

This commit is contained in:
Clarity-89 2025-10-02 17:41:39 +03:00
commit b3ff62a46f
113 changed files with 2122 additions and 697 deletions

View File

@ -13,6 +13,7 @@ import (
// API errors that we need to convey after parsing real GH errors (or faking them).
var (
ErrResourceNotFound = errors.New("the resource does not exist")
ErrUnauthorized = errors.New("unauthorized")
//lint:ignore ST1005 this is not punctuation
ErrServiceUnavailable = apierrors.NewServiceUnavailable("github is unavailable")
ErrTooManyItems = errors.New("maximum number of items exceeded")

View File

@ -199,6 +199,9 @@ func (r *githubClient) DeleteWebhook(ctx context.Context, owner, repository stri
if ghErr.Response.StatusCode == http.StatusNotFound {
return ErrResourceNotFound
}
if ghErr.Response.StatusCode == http.StatusUnauthorized || ghErr.Response.StatusCode == http.StatusForbidden {
return ErrUnauthorized
}
return err
}

View File

@ -975,6 +975,27 @@ func TestGithubClient_DeleteWebhook(t *testing.T) {
webhookID: 789,
wantErr: ErrServiceUnavailable,
},
{
name: "unauthorized to delete the webhook",
mockHandler: mockhub.NewMockedHTTPClient(
mockhub.WithRequestMatchHandler(
mockhub.DeleteReposHooksByOwnerByRepoByHookId,
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusUnauthorized)
require.NoError(t, json.NewEncoder(w).Encode(github.ErrorResponse{
Response: &http.Response{
StatusCode: http.StatusUnauthorized,
},
Message: "401 bad credentials",
}))
}),
),
),
owner: "test-owner",
repository: "test-repo",
webhookID: 789,
wantErr: ErrUnauthorized,
},
{
name: "other error",
mockHandler: mockhub.NewMockedHTTPClient(

View File

@ -274,11 +274,15 @@ func (r *githubWebhookRepository) deleteWebhook(ctx context.Context) error {
id := r.config.Status.Webhook.ID
err := r.gh.DeleteWebhook(ctx, r.owner, r.repo, id)
if err != nil && !errors.Is(err, ErrResourceNotFound) {
if err != nil && !errors.Is(err, ErrResourceNotFound) && !errors.Is(err, ErrUnauthorized) {
return fmt.Errorf("delete webhook: %w", err)
}
if errors.Is(err, ErrResourceNotFound) {
logger.Info("webhook does not exist", "url", r.config.Status.Webhook.URL, "id", id)
logger.Warn("webhook no longer exists", "url", r.config.Status.Webhook.URL, "id", id)
return nil
}
if errors.Is(err, ErrUnauthorized) {
logger.Warn("webhook deletion failed. no longer authorized to delete this webhook", "url", r.config.Status.Webhook.URL, "id", id)
return nil
}

View File

@ -1565,6 +1565,32 @@ func TestGitHubRepository_OnDelete(t *testing.T) {
// We don't return an error if the webhook is already gone
expectedError: nil,
},
{
name: "unauthorized to delete the webhook",
setupMock: func(m *MockClient) {
m.On("DeleteWebhook", mock.Anything, "grafana", "grafana", int64(123)).
Return(ErrUnauthorized)
},
config: &provisioning.Repository{
ObjectMeta: metav1.ObjectMeta{
Name: "test-repo",
},
Spec: provisioning.RepositorySpec{
GitHub: &provisioning.GitHubRepositoryConfig{
Branch: "main",
},
},
Status: provisioning.RepositoryStatus{
Webhook: &provisioning.WebhookStatus{
ID: 123,
URL: "https://example.com/webhook",
},
},
},
webhookURL: "https://example.com/webhook",
// We don't return an error if access to the webhook is revoked
expectedError: nil,
},
{
name: "no webhook URL provided",
setupMock: func(_ *MockClient) {},

View File

@ -75,11 +75,6 @@ func ValidateRepository(repo Repository) field.ErrorList {
"The target type is required when sync is enabled"))
}
if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < 10 {
list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"),
cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", 10)))
}
// Reserved names (for now)
reserved := []string{"classic", "sql", "SQL", "plugins", "legacy", "new", "job", "github", "s3", "gcs", "file", "new", "create", "update", "delete"}
if slices.Contains(reserved, cfg.Name) {

View File

@ -74,28 +74,6 @@ func TestValidateRepository(t *testing.T) {
require.Contains(t, errors.ToAggregate().Error(), "spec.sync.target: Required value")
},
},
{
name: "sync interval too low",
repository: func() *MockRepository {
m := NewMockRepository(t)
m.On("Config").Return(&provisioning.Repository{
Spec: provisioning.RepositorySpec{
Title: "Test Repo",
Sync: provisioning.SyncOptions{
Enabled: true,
Target: "test",
IntervalSeconds: 5,
},
},
})
m.On("Validate").Return(field.ErrorList{})
return m
}(),
expectedErrs: 1,
validateError: func(t *testing.T, errors field.ErrorList) {
require.Contains(t, errors.ToAggregate().Error(), "spec.sync.intervalSeconds: Invalid value")
},
},
{
name: "reserved name",
repository: func() *MockRepository {
@ -191,11 +169,10 @@ func TestValidateRepository(t *testing.T) {
m.On("Validate").Return(field.ErrorList{})
return m
}(),
expectedErrs: 4, // Updated from 3 to 4 to match actual errors:
expectedErrs: 3,
// 1. missing title
// 2. sync target missing
// 3. sync interval too low
// 4. reserved name
// 3. reserved name
},
{
name: "branch workflow for non-github repository",
@ -447,18 +424,6 @@ func TestFromFieldError(t *testing.T) {
expectedType: metav1.CauseTypeFieldValueRequired,
expectedDetail: "a repository title must be given",
},
{
name: "invalid field error",
fieldError: &field.Error{
Type: field.ErrorTypeInvalid,
Field: "spec.sync.intervalSeconds",
Detail: "Interval must be at least 10 seconds",
},
expectedCode: http.StatusBadRequest,
expectedField: "spec.sync.intervalSeconds",
expectedType: metav1.CauseTypeFieldValueInvalid,
expectedDetail: "Interval must be at least 10 seconds",
},
{
name: "not supported field error",
fieldError: &field.Error{

View File

@ -2229,3 +2229,8 @@ allowed_targets = instance|folder
# Whether image rendering is allowed for dashboard previews.
# Requires image rendering service to be configured.
allow_image_rendering = true
# The minimum sync interval that can be set for a repository. This is how often the controller
# will check if there has been any changes to the repository not propagated by a webhook.
# The minimum value is 10 seconds.
min_sync_interval = 10s

View File

@ -223,7 +223,7 @@ Team provisioning requires `group_sync_enabled = true` in the SCIM configuration
{{< /admonition >}}
{{< admonition type="warning" >}}
Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider.
Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider. Optionally, you can disable SCIM group sync to allow manual deletion of teams.
{{< /admonition >}}
For detailed configuration steps specific to the identity provider, see:

View File

@ -598,7 +598,7 @@
"auth-validator",
"config-loader",
"config-writer",
"metrics-collector",
"metrics-collector-last-span",
"log-writer",
"log-reader",
"event-publisher",

View File

@ -2,11 +2,6 @@ import { test, expect } from '@grafana/plugin-e2e';
import longTraceResponse from '../fixtures/long-trace-response.json';
// this test requires a larger viewport
test.use({
viewport: { width: 1280, height: 1080 },
});
test.describe(
'Trace view',
{
@ -33,7 +28,7 @@ test.describe(
await datasourceList.getByText('gdev-jaeger').click();
// Check that gdev-jaeger is visible in the query editor
await expect(page.getByText('gdev-jaeger')).toBeVisible();
await expect(page.getByTestId('query-editor-row').getByText('(gdev-jaeger)')).toBeVisible();
// Type the query
const queryField = page
@ -44,14 +39,22 @@ test.describe(
// Use Shift+Enter to execute the query
await queryField.press('Shift+Enter');
// Get the initial count of span bars
const initialSpanBars = page.getByTestId(selectors.components.TraceViewer.spanBar);
const initialSpanBarCount = await initialSpanBars.count();
// Wait for the trace viewer to be ready
await expect(page.getByRole('switch', { name: /api\-gateway GET/ })).toBeVisible();
await initialSpanBars.last().scrollIntoViewIfNeeded();
await expect
.poll(async () => await page.getByTestId(selectors.components.TraceViewer.spanBar).count())
.toBeGreaterThan(initialSpanBarCount);
// Note the scrolling element is actually the first child of the scroll view, but we can use the scroll wheel on this anyway
const scrollEl = page.getByTestId(selectors.pages.Explore.General.scrollView);
// Assert that the last span is not visible in th page - it should be lazily rendered as the user scrolls
const lastSpan = page.getByRole('switch', { name: /metrics\-collector\-last\-span GET/ });
await expect(lastSpan).not.toBeVisible();
// Scroll until the "metrics-collector-last-span GET" switch is visible
await expect(async () => {
await scrollEl.hover();
await page.mouse.wheel(0, 1000);
await expect(lastSpan).toBeVisible({ timeout: 1 });
}).toPass();
});
}
);

View File

@ -885,6 +885,11 @@ export interface FeatureToggles {
*/
alertingJiraIntegration?: boolean;
/**
*
* @default true
*/
alertingUseNewSimplifiedRoutingHashAlgorithm?: boolean;
/**
* Use the scopes navigation endpoint instead of the dashboardbindings endpoint
*/
useScopesNavigationEndpoint?: boolean;

View File

@ -23,9 +23,9 @@ const qualifiedNameFmt string = "^(" + qnameCharFmt + qnameExtCharFmt + "*)?" +
const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
const alphaCharFmt string = "[A-Za-z]"
const resourceCharFmt string = "[A-Za-z0-9]" // alpha numeric
const resourceCharFmt string = "[A-Za-z0-9-]" // alpha numeric plus dashes
const resourceFmt string = "^" + alphaCharFmt + resourceCharFmt + "*$"
const resourceErrMsg string = "must consist of alphanumeric characters"
const resourceErrMsg string = "must consist of alphanumeric characters and dashes, and must start with an alphabetic character"
var (
grafanaNameRegexp = regexp.MustCompile(grafanaNameFmt).MatchString

View File

@ -198,16 +198,17 @@ func TestValidation(t *testing.T) {
"folders",
"folders123",
"aaa",
"hello-world",
"hello-world-",
},
}, {
name: "bad input",
expect: []string{
"resource must consist of alphanumeric characters (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9]*$')",
"resource must consist of alphanumeric characters and dashes, and must start with an alphabetic character (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9-]*$')",
},
input: []string{
"_bad_input",
"hello world",
"hello-world",
"hello!",
"hello~",
"hello ",

View File

@ -120,8 +120,8 @@ func (f *finalizer) processExistingItems(
Group: item.Group,
Resource: item.Resource,
})
logger.Error("error getting client for resource", "resource", item.Resource, "error", err)
if err != nil {
logger.Error("error getting client for resource", "resource", item.Resource, "error", err)
return count, err
}

View File

@ -92,6 +92,7 @@ type APIBuilder struct {
allowedTargets []provisioning.SyncTargetType
allowImageRendering bool
minSyncInterval time.Duration
features featuremgmt.FeatureToggles
usageStats usagestats.Service
@ -144,6 +145,7 @@ func NewAPIBuilder(
allowedTargets []provisioning.SyncTargetType,
restConfigGetter func(context.Context) (*clientrest.Config, error),
allowImageRendering bool,
minSyncInterval time.Duration,
registry prometheus.Registerer,
newStandaloneClientFactoryFunc func(loopbackConfigProvider apiserver.RestConfigProvider) resources.ClientFactory, // optional, only used for standalone apiserver
) *APIBuilder {
@ -156,6 +158,11 @@ func NewAPIBuilder(
parsers := resources.NewParserFactory(clients)
resourceLister := resources.NewResourceListerForMigrations(unified, legacyMigrator, storageStatus)
// do not allow minsync interval to be less than 10
if minSyncInterval <= 10*time.Second {
minSyncInterval = 10 * time.Second
}
b := &APIBuilder{
onlyApiServer: onlyApiServer,
tracer: tracer,
@ -175,6 +182,7 @@ func NewAPIBuilder(
allowedTargets: allowedTargets,
restConfigGetter: restConfigGetter,
allowImageRendering: allowImageRendering,
minSyncInterval: minSyncInterval,
registry: registry,
}
@ -261,6 +269,7 @@ func RegisterAPIService(
allowedTargets,
nil, // will use loopback instead
cfg.ProvisioningAllowImageRendering,
cfg.ProvisioningMinSyncInterval,
reg,
nil,
)
@ -587,6 +596,11 @@ func (b *APIBuilder) Validate(ctx context.Context, a admission.Attributes, o adm
"sync target is not supported"))
}
if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < int64(b.minSyncInterval.Seconds()) {
list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"),
cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", int64(b.minSyncInterval.Seconds()))))
}
if !b.allowImageRendering && cfg.Spec.GitHub != nil && cfg.Spec.GitHub.GenerateDashboardPreviews {
list = append(list,
field.Invalid(field.NewPath("spec", "generateDashboardPreviews"),

View File

@ -0,0 +1,114 @@
package provisioning
import (
"context"
"testing"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/user"
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestAPIBuilderValidate(t *testing.T) {
factory := repository.NewMockFactory(t)
mockRepo := repository.NewMockConfigRepository(t)
mockRepo.EXPECT().Validate().Return(nil)
factory.EXPECT().Build(mock.Anything, mock.Anything).Return(mockRepo, nil)
b := &APIBuilder{
repoFactory: factory,
allowedTargets: []v0alpha1.SyncTargetType{v0alpha1.SyncTargetTypeFolder},
allowImageRendering: false,
minSyncInterval: 30 * time.Second,
}
t.Run("min sync interval is less than 10 seconds", func(t *testing.T) {
cfg := &v0alpha1.Repository{
Spec: v0alpha1.RepositorySpec{
Title: "repo",
Type: v0alpha1.GitHubRepositoryType,
Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeFolder, IntervalSeconds: 5},
},
}
mockRepo.EXPECT().Config().Return(cfg)
obj := newRepoObj("repo1", "default", cfg.Spec, v0alpha1.RepositoryStatus{})
err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil)
require.Error(t, err)
require.True(t, apierrors.IsInvalid(err))
})
t.Run("image rendering is not enabled", func(t *testing.T) {
cfg2 := &v0alpha1.Repository{
Spec: v0alpha1.RepositorySpec{
Title: "repo",
Type: v0alpha1.GitHubRepositoryType,
Sync: v0alpha1.SyncOptions{Enabled: false, Target: v0alpha1.SyncTargetTypeFolder},
GitHub: &v0alpha1.GitHubRepositoryConfig{URL: "https://github.com/acme/repo", Branch: "main", GenerateDashboardPreviews: true},
},
}
mockRepo.EXPECT().Config().Return(cfg2)
obj := newRepoObj("repo2", "default", cfg2.Spec, v0alpha1.RepositoryStatus{})
err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil)
require.Error(t, err)
require.True(t, apierrors.IsInvalid(err))
})
t.Run("sync target is not supported", func(t *testing.T) {
cfg3 := &v0alpha1.Repository{
Spec: v0alpha1.RepositorySpec{
Title: "repo",
Type: v0alpha1.GitHubRepositoryType,
Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeInstance},
},
}
mockRepo.EXPECT().Config().Return(cfg3)
obj := newRepoObj("repo3", "default", cfg3.Spec, v0alpha1.RepositoryStatus{})
err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil)
require.Error(t, err)
require.True(t, apierrors.IsInvalid(err))
})
}
func newRepoObj(name string, ns string, spec v0alpha1.RepositorySpec, status v0alpha1.RepositoryStatus) *v0alpha1.Repository {
return &v0alpha1.Repository{
TypeMeta: metav1.TypeMeta{APIVersion: v0alpha1.APIVERSION, Kind: "Repository"},
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
Spec: spec,
Status: status,
}
}
func newAttributes(obj, old runtime.Object, op admission.Operation) admission.Attributes {
return admission.NewAttributesRecord(
obj,
old,
v0alpha1.RepositoryResourceInfo.GroupVersionKind(),
"default",
func() string {
if obj != nil {
return obj.(*v0alpha1.Repository).Name
}
if old != nil {
return old.(*v0alpha1.Repository).Name
}
return ""
}(),
v0alpha1.RepositoryResourceInfo.GroupVersionResource(),
"",
op,
nil,
false,
&user.DefaultInfo{},
)
}

View File

@ -187,7 +187,7 @@ func (s *ModuleServer) Run() error {
if err != nil {
return nil, err
}
return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig)
return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig, s.httpServerRouter)
})
m.RegisterModule(modules.ZanzanaServer, func() (services.Service, error) {

View File

@ -1520,6 +1520,16 @@ var (
FrontendOnly: true,
HideFromDocs: true,
},
{
Name: "alertingUseNewSimplifiedRoutingHashAlgorithm",
Description: "",
Stage: FeatureStagePublicPreview,
Owner: grafanaAlertingSquad,
HideFromAdminPage: true,
HideFromDocs: true,
RequiresRestart: true,
Expression: "true",
},
{
Name: "useScopesNavigationEndpoint",
Description: "Use the scopes navigation endpoint instead of the dashboardbindings endpoint",

View File

@ -198,6 +198,7 @@ fetchRulesUsingPost,experimental,@grafana/alerting-squad,false,false,false
newLogsPanel,experimental,@grafana/observability-logs,false,false,true
grafanaconThemes,GA,@grafana/grafana-frontend-platform,false,true,false
alertingJiraIntegration,experimental,@grafana/alerting-squad,false,false,true
alertingUseNewSimplifiedRoutingHashAlgorithm,preview,@grafana/alerting-squad,false,true,false
useScopesNavigationEndpoint,experimental,@grafana/grafana-frontend-platform,false,false,true
scopeSearchAllLevels,experimental,@grafana/grafana-frontend-platform,false,false,false
alertingRuleVersionHistoryRestore,GA,@grafana/alerting-squad,false,false,true

1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
198 newLogsPanel experimental @grafana/observability-logs false false true
199 grafanaconThemes GA @grafana/grafana-frontend-platform false true false
200 alertingJiraIntegration experimental @grafana/alerting-squad false false true
201 alertingUseNewSimplifiedRoutingHashAlgorithm preview @grafana/alerting-squad false true false
202 useScopesNavigationEndpoint experimental @grafana/grafana-frontend-platform false false true
203 scopeSearchAllLevels experimental @grafana/grafana-frontend-platform false false false
204 alertingRuleVersionHistoryRestore GA @grafana/alerting-squad false false true

View File

@ -803,6 +803,9 @@ const (
// Enables the new Jira integration for contact points in cloud alert managers.
FlagAlertingJiraIntegration = "alertingJiraIntegration"
// FlagAlertingUseNewSimplifiedRoutingHashAlgorithm
FlagAlertingUseNewSimplifiedRoutingHashAlgorithm = "alertingUseNewSimplifiedRoutingHashAlgorithm"
// FlagUseScopesNavigationEndpoint
// Use the scopes navigation endpoint instead of the dashboardbindings endpoint
FlagUseScopesNavigationEndpoint = "useScopesNavigationEndpoint"

View File

@ -610,6 +610,46 @@
"expression": "true"
}
},
{
"metadata": {
"name": "alertingUseNewSimplifiedRoutingHashAlgorithm",
"resourceVersion": "1759339813575",
"creationTimestamp": "2025-10-01T17:28:42Z",
"deletionTimestamp": "2025-10-01T17:29:29Z",
"annotations": {
"grafana.app/updatedTimestamp": "2025-10-01 17:30:13.575464 +0000 UTC"
}
},
"spec": {
"description": "",
"stage": "preview",
"codeowner": "@grafana/alerting-squad",
"requiresRestart": true,
"hideFromAdminPage": true,
"hideFromDocs": true,
"expression": "true"
}
},
{
"metadata": {
"name": "alertingUseOldSimplifiedRoutingHashAlgorithm",
"resourceVersion": "1759339782639",
"creationTimestamp": "2025-10-01T17:29:29Z",
"deletionTimestamp": "2025-10-01T17:30:13Z",
"annotations": {
"grafana.app/updatedTimestamp": "2025-10-01 17:29:42.63941 +0000 UTC"
}
},
"spec": {
"description": "",
"stage": "deprecated",
"codeowner": "@grafana/alerting-squad",
"requiresRestart": true,
"hideFromAdminPage": true,
"hideFromDocs": true,
"expression": "false"
}
},
{
"metadata": {
"name": "alertmanagerRemotePrimary",

View File

@ -291,7 +291,8 @@ func TestAlertmanagerAutogenConfig(t *testing.T) {
1: {AlertmanagerConfiguration: validConfig, OrgID: 1},
2: {AlertmanagerConfiguration: validConfigWithoutAutogen, OrgID: 2},
}
sut.mam = createMultiOrgAlertmanager(t, configs)
ft := featuremgmt.WithFeatures(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm)
sut.mam = createMultiOrgAlertmanager(t, configs, withAMFeatureToggles(ft))
return sut, configs
}
@ -577,9 +578,29 @@ func createSut(t *testing.T) AlertmanagerSrv {
}
}
func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration) *notifier.MultiOrgAlertmanager {
type createMultiOrgAMOptions struct {
featureToggles featuremgmt.FeatureToggles
}
type createMultiOrgAMOptionsFunc func(*createMultiOrgAMOptions)
func withAMFeatureToggles(toggles featuremgmt.FeatureToggles) createMultiOrgAMOptionsFunc {
return func(opts *createMultiOrgAMOptions) {
opts.featureToggles = toggles
}
}
func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration, opts ...createMultiOrgAMOptionsFunc) *notifier.MultiOrgAlertmanager {
t.Helper()
options := createMultiOrgAMOptions{
featureToggles: featuremgmt.WithFeatures(),
}
for _, opt := range opts {
opt(&options)
}
configStore := notifier.NewFakeConfigStore(t, configs)
orgStore := notifier.NewFakeOrgStore(t, []int64{1, 2, 3})
provStore := ngfakes.NewFakeProvisioningStore()
@ -610,7 +631,7 @@ func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertC
ngfakes.NewFakeReceiverPermissionsService(),
log.New("testlogger"),
secretsService,
featuremgmt.WithManager(),
options.featureToggles,
nil,
)
require.NoError(t, err)

View File

@ -113,7 +113,7 @@ func (srv TestingApiSrv) RouteTestGrafanaRuleConfig(c *contextmodel.ReqContext,
now,
rule,
results,
state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder),
state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder, srv.featureManager),
nil,
)

View File

@ -8,6 +8,7 @@ import (
"unsafe"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/prometheus/common/model"
)
@ -102,12 +103,12 @@ func (s *NotificationSettings) Validate() error {
// - AutogeneratedRouteLabel: "true"
// - AutogeneratedRouteReceiverNameLabel: Receiver
// - AutogeneratedRouteSettingsHashLabel: Fingerprint (if the NotificationSettings are not all default)
func (s *NotificationSettings) ToLabels() data.Labels {
func (s *NotificationSettings) ToLabels(features featuremgmt.FeatureToggles) data.Labels {
result := make(data.Labels, 3)
result[AutogeneratedRouteLabel] = "true"
result[AutogeneratedRouteReceiverNameLabel] = s.Receiver
if !s.IsAllDefault() {
result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint().String()
result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint(features).String()
}
return result
}
@ -160,7 +161,7 @@ func NewDefaultNotificationSettings(receiver string) NotificationSettings {
// Fingerprint calculates a hash value to uniquely identify a NotificationSettings by its attributes.
// The hash is calculated by concatenating the strings and durations of the NotificationSettings attributes
// and using an invalid UTF-8 sequence as a separator.
func (s *NotificationSettings) Fingerprint() data.Fingerprint {
func (s *NotificationSettings) Fingerprint(features featuremgmt.FeatureToggles) data.Fingerprint {
h := fnv.New64()
tmp := make([]byte, 8)
@ -192,7 +193,10 @@ func (s *NotificationSettings) Fingerprint() data.Fingerprint {
}
// Add a separator between the time intervals to avoid collisions
// when all settings are the same including interval names except for the interval type (mute vs active).
// Use new algorithm by default, unless feature flag is explicitly disabled
if features == nil || (features != nil && features.IsEnabledGlobally(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm)) {
_, _ = h.Write([]byte{255})
}
for _, interval := range s.ActiveTimeIntervals {
writeString(interval)
}

View File

@ -195,7 +195,7 @@ func TestNotificationSettingsLabels(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
labels := tt.notificationSettings.ToLabels()
labels := tt.notificationSettings.ToLabels(nil)
require.Equal(t, tt.labels, labels)
})
}
@ -219,7 +219,7 @@ func TestNotificationSettings_TimeIntervals(t *testing.T) {
ActiveTimeIntervals: []string{timeInterval},
}
require.NotEqual(t, activeSettings.Fingerprint(), muteSettings.Fingerprint())
require.NotEqual(t, activeSettings.Fingerprint(nil), muteSettings.Fingerprint(nil))
}
func TestNormalizedGroupBy(t *testing.T) {

View File

@ -220,7 +220,7 @@ func (ng *AlertNG) init() error {
Timeout: ng.Cfg.UnifiedAlerting.RemoteAlertmanager.Timeout,
}
autogenFn := func(ctx context.Context, logger log.Logger, orgID int64, cfg *definitions.PostableApiAlertingConfig, skipInvalid bool) error {
return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid)
return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid, ng.FeatureToggles)
}
// This function will be used by the MOA to create new Alertmanagers.

View File

@ -56,6 +56,7 @@ type alertmanager struct {
DefaultConfiguration string
decryptFn alertingNotify.GetDecryptedValueFn
crypto Crypto
features featuremgmt.FeatureToggles
}
// maintenanceOptions represent the options for components that need maintenance on a frequency within the Alertmanager.
@ -155,6 +156,7 @@ func NewAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A
logger: l.New("component", "alertmanager", opts.TenantKey, opts.TenantID), // similar to what the base does
decryptFn: decryptFn,
crypto: crypto,
features: featureToggles,
}
return am, nil
@ -344,7 +346,7 @@ func (am *alertmanager) applyConfig(ctx context.Context, cfg *apimodels.Postable
templates := alertingNotify.PostableAPITemplatesToTemplateDefinitions(cfg.GetMergedTemplateDefinitions())
// Now add autogenerated config to the route.
err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid)
err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid, am.features)
if err != nil {
return false, err
}

View File

@ -133,7 +133,7 @@ func (moa *MultiOrgAlertmanager) GetAlertmanagerConfiguration(ctx context.Contex
// Otherwise, broken settings (e.g. a receiver that doesn't exist) will cause the config returned here to be
// different than the config currently in-use.
// TODO: Preferably, we'd be getting the config directly from the in-memory AM so adding the autogen config would not be necessary.
err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true)
err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true, moa.featureManager)
if err != nil {
return definitions.GettableUserConfig{}, err
}

View File

@ -12,6 +12,7 @@ import (
"golang.org/x/exp/maps"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
"github.com/grafana/grafana/pkg/services/ngalert/models"
)
@ -22,8 +23,8 @@ type autogenRuleStore interface {
// AddAutogenConfig creates the autogenerated configuration and adds it to the given apiAlertingConfig.
// If skipInvalid is true, then invalid notification settings are skipped, otherwise an error is returned.
func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) error {
autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid)
func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) error {
autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid, features)
if err != nil {
return err
}
@ -39,7 +40,7 @@ func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store
// newAutogeneratedRoute creates a new autogenerated route based on the notification settings for the given org.
// cfg is used to construct the settings validator and to ensure we create a dedicated route for each receiver.
// skipInvalid is used to skip invalid settings instead of returning an error.
func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) (autogeneratedRoute, error) {
func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) (autogeneratedRoute, error) {
settings, err := store.ListNotificationSettings(ctx, models.ListNotificationSettingsQuery{OrgID: orgId})
if err != nil {
return autogeneratedRoute{}, fmt.Errorf("failed to list alert rules: %w", err)
@ -50,7 +51,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s
// contact point even if no rules are using it. This will prevent race conditions between AM sync and rule sync.
for _, receiver := range cfg.GetReceivers() {
setting := models.NewDefaultNotificationSettings(receiver.GetName())
fp := setting.Fingerprint()
fp := setting.Fingerprint(features)
notificationSettings[fp] = setting
}
@ -65,7 +66,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s
}
return autogeneratedRoute{}, fmt.Errorf("invalid notification settings for rule %s: %w", ruleKey.UID, err)
}
fp := setting.Fingerprint()
fp := setting.Fingerprint(features)
// Keep only unique settings.
if _, ok := notificationSettings[fp]; ok {
continue

View File

@ -290,7 +290,7 @@ func TestAddAutogenConfig(t *testing.T) {
store.notificationSettings[orgId][models.AlertRuleKey{OrgID: orgId, UID: util.GenerateShortUID()}] = []models.NotificationSettings{setting}
}
err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid)
err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid, nil)
if tt.expErrorContains != "" {
require.Error(t, err)
require.ErrorContains(t, err, tt.expErrorContains)

View File

@ -471,7 +471,7 @@ func (a *alertRule) evaluate(ctx context.Context, e *Evaluation, span trace.Span
e.scheduledAt,
e.rule,
results,
state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder),
state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder, a.featureToggles),
func(ctx context.Context, statesToSend state.StateTransitions) {
start := a.clock.Now()
alerts := a.send(ctx, logger, statesToSend)

View File

@ -1317,7 +1317,7 @@ func stateForRule(rule *models.AlertRule, ts time.Time, evalState eval.State) *s
for k, v := range rule.Labels {
s.Labels[k] = v
}
for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true) {
for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true, nil) {
if _, ok := s.Labels[k]; !ok {
s.Labels[k] = v
}

View File

@ -304,7 +304,7 @@ func (r ruleWithFolder) Fingerprint() fingerprint {
}
for _, setting := range rule.NotificationSettings {
binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint()))
binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint(nil)))
writeBytes(tmp)
}

View File

@ -18,6 +18,7 @@ import (
"github.com/grafana/grafana/pkg/apimachinery/errutil"
"github.com/grafana/grafana/pkg/expr"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/ngalert/eval"
"github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/screenshot"
@ -753,7 +754,7 @@ func ParseFormattedState(stateStr string) (eval.State, string, error) {
}
// GetRuleExtraLabels returns a map of built-in labels that should be added to an alert before it is sent to the Alertmanager or its state is cached.
func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool) map[string]string {
func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool, features featuremgmt.FeatureToggles) map[string]string {
extraLabels := make(map[string]string, 4)
extraLabels[alertingModels.NamespaceUIDLabel] = rule.NamespaceUID
@ -771,7 +772,7 @@ func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string
ignored, _ := json.Marshal(rule.NotificationSettings[1:])
l.Error("Detected multiple notification settings, which is not supported. Only the first will be applied", "ignored_settings", string(ignored))
}
return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels())
return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels(features))
}
return extraLabels
}

View File

@ -779,7 +779,7 @@ func TestGetRuleExtraLabels(t *testing.T) {
models.RuleUIDLabel: rule.UID,
ngmodels.AutogeneratedRouteLabel: "true",
ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver,
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(),
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(),
},
},
"ignore_multiple_notifications": {
@ -794,14 +794,14 @@ func TestGetRuleExtraLabels(t *testing.T) {
models.RuleUIDLabel: rule.UID,
ngmodels.AutogeneratedRouteLabel: "true",
ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver,
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(),
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(),
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder)
result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder, nil)
require.Equal(t, tc.expected, result)
})
}

View File

@ -334,6 +334,9 @@ func filterOutSpecialDatasources(dash *DashboardSummaryInfo) {
case "-- Dashboard --":
// The `Dashboard` datasource refers to the results of the query used in another panel
continue
case "grafana":
// this is the uid for the -- Grafana -- datasource
continue
default:
dsRefs = append(dsRefs, ds)
}

View File

@ -2,24 +2,12 @@
"id": 250,
"title": "fast streaming",
"tags": null,
"datasource": [
{
"uid": "grafana",
"type": "datasource"
}
],
"panels": [
{
"id": 3,
"title": "Panel Title",
"type": "timeseries",
"pluginVersion": "7.5.0-pre",
"datasource": [
{
"uid": "grafana",
"type": "datasource"
}
]
"pluginVersion": "7.5.0-pre"
}
],
"schemaVersion": 27,

View File

@ -3,10 +3,6 @@
"title": "special ds",
"tags": null,
"datasource": [
{
"uid": "grafana",
"type": "datasource"
},
{
"uid": "dgd92lq7k",
"type": "frser-sqlite-datasource"
@ -22,10 +18,6 @@
"title": "mixed ds with grafana ds",
"type": "timeseries",
"datasource": [
{
"uid": "grafana",
"type": "datasource"
},
{
"uid": "dgd92lq7k",
"type": "frser-sqlite-datasource"
@ -45,13 +37,7 @@
{
"id": 6,
"title": "grafana ds",
"type": "timeseries",
"datasource": [
{
"uid": "grafana",
"type": "datasource"
}
]
"type": "timeseries"
},
{
"id": 2,

View File

@ -431,7 +431,8 @@ func (tapi *TeamAPI) validateTeam(c *contextmodel.ReqContext, teamID int64, prov
return response.Error(http.StatusInternalServerError, "Failed to get Team", err)
}
if teamDTO.IsProvisioned {
isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false)
if isGroupSyncEnabled && teamDTO.IsProvisioned {
return response.Error(http.StatusBadRequest, provisionedMessage, err)
}

View File

@ -299,7 +299,8 @@ func (tapi *TeamAPI) removeTeamMember(c *contextmodel.ReqContext) response.Respo
return response.Error(http.StatusInternalServerError, "Failed to get Team", err)
}
if existingTeam.IsProvisioned {
isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false)
if isGroupSyncEnabled && existingTeam.IsProvisioned {
return response.Error(http.StatusBadRequest, "Team memberships cannot be updated for provisioned teams", err)
}

View File

@ -169,13 +169,15 @@ func TestUpdateTeamMembersAPIEndpoint(t *testing.T) {
})
}
func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) {
func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsEnabled(t *testing.T) {
server := SetupAPITestServer(t, &teamtest.FakeService{
ExpectedIsMember: true,
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
}, func(tapi *TeamAPI) {
tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true")
})
t.Run("should not be able to update team member from a provisioned team", func(t *testing.T) {
t.Run("should not be able to update team member from a provisioned team if team sync is enabled", func(t *testing.T) {
req := webtest.RequestWithSignedInUser(
server.NewRequest(http.MethodPut, "/api/teams/1/members/1", strings.NewReader("{\"permission\": 1}")),
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
@ -186,7 +188,7 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) {
require.NoError(t, res.Body.Close())
})
t.Run("should not be able to update team member from a provisioned team by team UID", func(t *testing.T) {
t.Run("should not be able to update team member from a provisioned team by team UID if team sync is enabled", func(t *testing.T) {
req := webtest.RequestWithSignedInUser(
server.NewRequest(http.MethodPut, "/api/teams/a00001/members/1", strings.NewReader("{\"permission\": 1}")),
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
@ -198,6 +200,27 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) {
})
}
func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsDisabled(t *testing.T) {
t.Run("should be able to delete team member from a provisioned team when SCIM group sync is disabled", func(t *testing.T) {
server := SetupAPITestServer(t, nil, func(hs *TeamAPI) {
hs.teamService = &teamtest.FakeService{
ExpectedIsMember: true,
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
}
hs.teamPermissionsService = &actest.FakePermissionsService{}
})
req := webtest.RequestWithSignedInUser(
server.NewRequest(http.MethodDelete, "/api/teams/1/members/1", nil),
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
)
res, err := server.SendJSON(req)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, res.StatusCode)
require.NoError(t, res.Body.Close())
})
}
func TestDeleteTeamMembersAPIEndpoint(t *testing.T) {
server := SetupAPITestServer(t, nil, func(hs *TeamAPI) {
hs.teamService = &teamtest.FakeService{
@ -236,6 +259,8 @@ func TestDeleteTeamMembersFromProvisionedTeam(t *testing.T) {
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
}
hs.teamPermissionsService = &actest.FakePermissionsService{}
}, func(hs *TeamAPI) {
hs.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true")
})
t.Run("should not be able to delete team member from a provisioned team", func(t *testing.T) {

View File

@ -138,6 +138,7 @@ type Cfg struct {
ProvisioningDisableControllers bool
ProvisioningAllowedTargets []string
ProvisioningAllowImageRendering bool
ProvisioningMinSyncInterval time.Duration
ProvisioningRepositoryTypes []string
ProvisioningLokiURL string
ProvisioningLokiUser string
@ -2125,6 +2126,7 @@ func (cfg *Cfg) readProvisioningSettings(iniFile *ini.File) error {
cfg.ProvisioningAllowedTargets = []string{"instance", "folder"}
}
cfg.ProvisioningAllowImageRendering = iniFile.Section("provisioning").Key("allow_image_rendering").MustBool(true)
cfg.ProvisioningMinSyncInterval = iniFile.Section("provisioning").Key("min_sync_interval").MustDuration(10 * time.Second)
// Read job history configuration
cfg.ProvisioningLokiURL = valueAsString(iniFile.Section("provisioning"), "loki_url", "")

View File

@ -293,13 +293,6 @@ func (s *Storage) Delete(
if err := preconditions.Check(key, out); err != nil {
return err
}
if preconditions.ResourceVersion != nil {
cmd.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
if err != nil {
return err
}
}
if preconditions.UID != nil {
cmd.Uid = string(*preconditions.UID)
}
@ -319,6 +312,10 @@ func (s *Storage) Delete(
return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_DELETED, key, out, out)
}
cmd.ResourceVersion, err = meta.GetResourceVersionInt64()
if err != nil {
return resource.GetError(resource.AsErrorResult(err))
}
rsp, err := s.store.Delete(ctx, cmd)
if err != nil {
return resource.GetError(resource.AsErrorResult(err))
@ -536,6 +533,18 @@ func (s *Storage) GuaranteedUpdate(
if err != nil {
return err
}
// NOTE: by default, the RV will **not** be set in the preconditions (it is removed here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L187)
// instead, the RV check is done with the object from the request itself.
//
// the object from the request is retrieved in the tryUpdate function (we use the generic k8s store one). this function calls the UpdateObject function here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L653
// and that will run a series of transformations: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L219
//
// the specific transformations it runs depends on what type of update it is.
// for patch, the transformers are set here and use the patchBytes from the request: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go#L697
// for put, it uses the object from the request here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go#L163
//
// after those transformations, the RV will then be on the object so that the RV check can properly be done here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L662
// it will be compared to the current object that we pass in below from storage.
if preconditions != nil && preconditions.ResourceVersion != nil {
req.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
if err != nil {
@ -611,8 +620,6 @@ func (s *Storage) GuaranteedUpdate(
}
continue
}
break
}
v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj)
if err != nil {
@ -623,10 +630,14 @@ func (s *Storage) GuaranteedUpdate(
var rv uint64
req.Value = v.raw.Bytes()
if !bytes.Equal(req.Value, existingBytes) {
req.ResourceVersion = readResponse.ResourceVersion
updateResponse, err := s.store.Update(ctx, req)
if err != nil {
err = resource.GetError(resource.AsErrorResult(err))
} else if updateResponse.Error != nil {
if attempt < MaxUpdateAttempts && updateResponse.Error.Code == http.StatusConflict {
continue // try the read again
}
err = resource.GetError(updateResponse.Error)
}
@ -647,6 +658,8 @@ func (s *Storage) GuaranteedUpdate(
return err
}
}
return nil
}
return nil
}

View File

@ -18,7 +18,6 @@ import (
// Package-level errors.
var (
ErrOptimisticLockingFailed = errors.New("optimistic locking failed")
ErrNotImplementedYet = errors.New("not implemented yet")
)
@ -31,6 +30,12 @@ var (
Code: http.StatusConflict,
},
}
ErrOptimisticLockingFailed = resourcepb.ErrorResult{
Code: http.StatusConflict,
Reason: "optimistic locking failed",
Message: "requested RV does not match saved RV",
}
)
func NewBadRequestError(msg string) *resourcepb.ErrorResult {

View File

@ -731,8 +731,12 @@ func (s *server) update(ctx context.Context, user claims.AuthInfo, req *resource
return rsp, nil
}
// TODO: once we know the client is always sending the RV, require ResourceVersion > 0
// See: https://github.com/grafana/grafana/pull/111866
if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion {
return nil, ErrOptimisticLockingFailed
return &resourcepb.UpdateResponse{
Error: &ErrOptimisticLockingFailed,
}, nil
}
event, e := s.newEvent(ctx, user, req.Key, req.Value, latest.Value)
@ -796,7 +800,7 @@ func (s *server) delete(ctx context.Context, user claims.AuthInfo, req *resource
return rsp, nil
}
if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion {
rsp.Error = AsErrorResult(ErrOptimisticLockingFailed)
rsp.Error = &ErrOptimisticLockingFailed
return rsp, nil
}

View File

@ -477,11 +477,12 @@ func TestSimpleServer(t *testing.T) {
ResourceVersion: created.ResourceVersion})
require.NoError(t, err)
_, err = server.Update(ctx, &resourcepb.UpdateRequest{
rsp, _ := server.Update(ctx, &resourcepb.UpdateRequest{
Key: key,
Value: raw,
ResourceVersion: created.ResourceVersion})
require.ErrorIs(t, err, ErrOptimisticLockingFailed)
require.Equal(t, rsp.Error.Code, ErrOptimisticLockingFailed.Code)
require.Equal(t, rsp.Error.Message, ErrOptimisticLockingFailed.Message)
})
}

View File

@ -26,7 +26,6 @@
"createdBy": "user:be2g71ke8yoe8b",
"fields": {
"ds_types": [
"datasource",
"my-custom-plugin"
],
"errors_last_1_days": 1,
@ -47,12 +46,6 @@
"kind": "DataSource",
"name": "DSUID"
},
{
"relation": "depends-on",
"group": "datasource",
"kind": "DataSource",
"name": "grafana"
},
{
"relation": "depends-on",
"group": "dashboards.grafana.app",

View File

@ -18,6 +18,7 @@ import (
"go.opentelemetry.io/otel/trace/noop"
"google.golang.org/protobuf/proto"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/grafana/grafana/pkg/util/sqlite"
@ -405,15 +406,18 @@ func (b *backend) update(ctx context.Context, event resource.WriteEvent) (int64,
// Use rvManager.ExecWithRV instead of direct transaction
rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) {
// 1. Update resource
_, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
res, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
SQLTemplate: sqltemplate.New(b.dialect),
WriteEvent: event,
WriteEvent: event, // includes the RV
Folder: folder,
GUID: event.GUID,
})
if err != nil {
return event.GUID, fmt.Errorf("resource update: %w", err)
}
if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil {
return event.GUID, err
}
// 2. Insert into resource history
if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{
@ -460,7 +464,7 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64,
}
rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) {
// 1. delete from resource
_, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{
res, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{
SQLTemplate: sqltemplate.New(b.dialect),
WriteEvent: event,
GUID: event.GUID,
@ -468,6 +472,9 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64,
if err != nil {
return event.GUID, fmt.Errorf("delete resource: %w", err)
}
if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil {
return event.GUID, err
}
// 2. Add event to resource history
if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{
@ -504,6 +511,28 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64,
return rv, nil
}
func (b *backend) checkConflict(res db.Result, key *resourcepb.ResourceKey, rv int64) error {
if rv == 0 {
return nil
}
// The RV is part of the update request, and it may no longer be the most recent
rows, err := res.RowsAffected()
if err != nil {
return fmt.Errorf("unable to verify RV: %w", err)
}
if rows == 1 {
return nil // expected one result
}
if rows > 0 {
return fmt.Errorf("multiple rows effected (%d)", rows)
}
return apierrors.NewConflict(schema.GroupResource{
Group: key.Group,
Resource: key.Resource,
}, key.Name, fmt.Errorf("resource version does not match current value"))
}
func (b *backend) ReadResource(ctx context.Context, req *resourcepb.ReadRequest) *resource.BackendReadResponse {
_, span := b.tracer.Start(ctx, tracePrefix+".Read")
defer span.End()

View File

@ -8,7 +8,6 @@ import (
"testing"
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"

View File

@ -5,5 +5,6 @@ DELETE FROM {{ .Ident "resource" }}
AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }}
{{ if .WriteEvent.Key.Name }}
AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }}
AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }}
{{ end }}
;

View File

@ -10,4 +10,5 @@ UPDATE {{ .Ident "resource" }}
AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }}
AND {{ .Ident "namespace" }} = {{ .Arg .WriteEvent.Key.Namespace }}
AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }}
AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }}
;

View File

@ -88,13 +88,22 @@ func TestIntegrationListIter(t *testing.T) {
Name: item.name,
},
Value: item.value,
PreviousRV: 0,
},
})
if err != nil {
return fmt.Errorf("failed to insert test data: %w", err)
}
_, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdateRV, sqlResourceUpdateRVRequest{
SQLTemplate: sqltemplate.New(dialect),
GUIDToRV: map[string]int64{
item.guid: item.resourceVersion,
},
}); err != nil {
return fmt.Errorf("failed to insert test data: %w", err)
}
if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
SQLTemplate: sqltemplate.New(dialect),
GUID: item.guid,
ResourceVersion: item.resourceVersion,
@ -110,8 +119,7 @@ func TestIntegrationListIter(t *testing.T) {
PreviousRV: item.resourceVersion,
Type: 1,
},
})
if err != nil {
}); err != nil {
return fmt.Errorf("failed to insert resource version: %w", err)
}
}

View File

@ -31,6 +31,21 @@ func TestUnifiedStorageQueries(t *testing.T) {
},
},
},
{
Name: "with rv",
Data: &sqlResourceRequest{
SQLTemplate: mocks.NewTestingSQLTemplate(),
WriteEvent: resource.WriteEvent{
Key: &resourcepb.ResourceKey{
Namespace: "nn",
Group: "gg",
Resource: "rr",
Name: "name",
},
PreviousRV: 1234,
},
},
},
},
sqlResourceInsert: {
{
@ -63,6 +78,7 @@ func TestUnifiedStorageQueries(t *testing.T) {
Resource: "rr",
Name: "name",
},
PreviousRV: 1759304090100678,
},
Folder: "fldr",
},

View File

@ -263,7 +263,7 @@ func (m *resourceVersionManager) execBatch(ctx context.Context, group, resource
attribute.Int("operation_index", i),
attribute.String("error", err.Error()),
))
return fmt.Errorf("failed to execute function: %w", err)
return err
}
guids[i] = guid
}

View File

@ -6,10 +6,12 @@ import (
"fmt"
"hash/fnv"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/otel"
@ -94,6 +96,7 @@ func ProvideUnifiedStorageGrpcService(
indexMetrics *resource.BleveIndexMetrics,
searchRing *ring.Ring,
memberlistKVConfig kv.Config,
httpServerRouter *mux.Router,
) (UnifiedStorageGrpcService, error) {
var err error
tracer := otel.Tracer("unified-storage")
@ -159,6 +162,10 @@ func ProvideUnifiedStorageGrpcService(
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true)
subservices = append(subservices, s.ringLifecycler)
if httpServerRouter != nil {
httpServerRouter.Path("/prepare-downscale").Methods("GET", "POST", "DELETE").Handler(http.HandlerFunc(s.PrepareDownscale))
}
}
if cfg.QOSEnabled {
@ -194,6 +201,21 @@ func ProvideUnifiedStorageGrpcService(
return s, nil
}
func (s *service) PrepareDownscale(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodPost:
s.log.Info("Preparing for downscale. Will not keep instance in ring on shutdown.")
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(false)
case http.MethodDelete:
s.log.Info("Downscale canceled. Will keep instance in ring on shutdown.")
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true)
case http.MethodGet:
// used for delayed downscale use case, which we don't support. Leaving here for completion sake
s.log.Info("Received GET request for prepare-downscale. Behavior not implemented.")
default:
}
}
var (
// operation used by the search-servers to check if they own the namespace
searchOwnerRead = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, nil)

View File

@ -128,7 +128,7 @@ func TestClientServer(t *testing.T) {
features := featuremgmt.WithFeatures()
svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{})
svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil)
require.NoError(t, err)
var client resourcepb.ResourceStoreClient

View File

@ -4,4 +4,5 @@ DELETE FROM `resource`
AND `group` = 'gg'
AND `resource` = 'rr'
AND `name` = 'name'
AND `resource_version` = 0
;

View File

@ -0,0 +1,8 @@
DELETE FROM `resource`
WHERE 1 = 1
AND `namespace` = 'nn'
AND `group` = 'gg'
AND `resource` = 'rr'
AND `name` = 'name'
AND `resource_version` = 1234
;

View File

@ -10,4 +10,5 @@ UPDATE `resource`
AND `resource` = 'rr'
AND `namespace` = 'nn'
AND `name` = 'name'
AND `resource_version` = 1759304090100678
;

View File

@ -4,4 +4,5 @@ DELETE FROM "resource"
AND "group" = 'gg'
AND "resource" = 'rr'
AND "name" = 'name'
AND "resource_version" = 0
;

View File

@ -0,0 +1,8 @@
DELETE FROM "resource"
WHERE 1 = 1
AND "namespace" = 'nn'
AND "group" = 'gg'
AND "resource" = 'rr'
AND "name" = 'name'
AND "resource_version" = 1234
;

View File

@ -10,4 +10,5 @@ UPDATE "resource"
AND "resource" = 'rr'
AND "namespace" = 'nn'
AND "name" = 'name'
AND "resource_version" = 1759304090100678
;

View File

@ -4,4 +4,5 @@ DELETE FROM "resource"
AND "group" = 'gg'
AND "resource" = 'rr'
AND "name" = 'name'
AND "resource_version" = 0
;

View File

@ -0,0 +1,8 @@
DELETE FROM "resource"
WHERE 1 = 1
AND "namespace" = 'nn'
AND "group" = 'gg'
AND "resource" = 'rr'
AND "name" = 'name'
AND "resource_version" = 1234
;

View File

@ -10,4 +10,5 @@ UPDATE "resource"
AND "resource" = 'rr'
AND "namespace" = 'nn'
AND "name" = 'name'
AND "resource_version" = 1759304090100678
;

View File

@ -124,13 +124,13 @@ func runTestIntegrationBackendHappyPath(t *testing.T, backend resource.StorageBa
})
t.Run("Update item2", func(t *testing.T) {
rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2))
require.NoError(t, err)
require.Greater(t, rv4, rv3)
})
t.Run("Delete item1", func(t *testing.T) {
rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1))
require.NoError(t, err)
require.Greater(t, rv5, rv4)
})
@ -352,10 +352,10 @@ func runTestIntegrationBackendList(t *testing.T, backend resource.StorageBackend
rv5, err := writeEvent(ctx, backend, "item5", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.NoError(t, err)
require.Greater(t, rv5, rv4)
rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2))
require.NoError(t, err)
require.Greater(t, rv6, rv5)
rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv3))
require.NoError(t, err)
require.Greater(t, rv7, rv6)
rv8, err := writeEvent(ctx, backend, "item6", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
@ -490,10 +490,10 @@ func runTestIntegrationBackendListModifiedSince(t *testing.T, backend resource.S
ns := nsPrefix + "-history-ns"
rvCreated, _ := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.Greater(t, rvCreated, int64(0))
rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvCreated))
require.NoError(t, err)
require.Greater(t, rvUpdated, rvCreated)
rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rvUpdated))
require.NoError(t, err)
require.Greater(t, rvDeleted, rvUpdated)
@ -610,19 +610,19 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
require.Greater(t, rv1, int64(0))
// add 5 events for item1 - should be saved to history
rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1))
require.NoError(t, err)
require.Greater(t, rvHistory1, rv1)
rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory1))
require.NoError(t, err)
require.Greater(t, rvHistory2, rvHistory1)
rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory2))
require.NoError(t, err)
require.Greater(t, rvHistory3, rvHistory2)
rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory3))
require.NoError(t, err)
require.Greater(t, rvHistory4, rvHistory3)
rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory4))
require.NoError(t, err)
require.Greater(t, rvHistory5, rvHistory4)
@ -804,8 +804,9 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
resourceVersions = append(resourceVersions, initialRV)
// Create 9 more versions with modifications
rv := initialRV
for i := 0; i < 9; i++ {
rv, err := writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns2))
rv, err = writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns2, rv))
require.NoError(t, err)
resourceVersions = append(resourceVersions, rv)
}
@ -907,7 +908,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
// Create a resource and delete it
rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.NoError(t, err)
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv))
require.NoError(t, err)
require.Greater(t, rvDeleted, rv)
@ -932,7 +933,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
// Create a resource and delete it
rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.NoError(t, err)
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv))
require.NoError(t, err)
require.Greater(t, rvDeleted, rv)
@ -940,7 +941,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
rv1, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.NoError(t, err)
require.Greater(t, rv1, rvDeleted)
rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1))
require.NoError(t, err)
require.Greater(t, rv2, rv1)
@ -983,8 +984,8 @@ func runTestIntegrationBackendListHistoryErrorReporting(t *testing.T, backend re
const events = 500
prevRv := origRv
for i := 0; i < events; i++ {
rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespace(ns), WithGroup(group), WithResource(resourceName))
for range events {
rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, prevRv), WithGroup(group), WithResource(resourceName))
require.NoError(t, err)
require.Greater(t, rv, prevRv)
prevRv = rv
@ -1131,6 +1132,14 @@ func runTestIntegrationBackendCreateNewResource(t *testing.T, backend resource.S
// WriteEventOption is a function that modifies WriteEventOptions
type WriteEventOption func(*WriteEventOptions)
// WithNamespace sets the namespace for the write event
func WithNamespaceAndRV(namespace string, rv int64) WriteEventOption {
return func(o *WriteEventOptions) {
o.Namespace = namespace
o.PreviousRV = rv
}
}
// WithNamespace sets the namespace for the write event
func WithNamespace(namespace string) WriteEventOption {
return func(o *WriteEventOptions) {
@ -1185,6 +1194,7 @@ type WriteEventOptions struct {
Resource string
Folder string
Value []byte
PreviousRV int64
}
func writeEvent(ctx context.Context, store resource.StorageBackend, name string, action resourcepb.WatchEvent_Type, opts ...WriteEventOption) (int64, error) {
@ -1236,6 +1246,7 @@ func writeEvent(ctx context.Context, store resource.StorageBackend, name string,
Resource: options.Resource,
Name: name,
},
PreviousRV: options.PreviousRV,
}
switch action {
case resourcepb.WatchEvent_DELETED:
@ -1285,18 +1296,15 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken
rv1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.NoError(t, err)
require.Greater(t, rv1, int64(0))
rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1))
require.NoError(t, err)
require.Greater(t, rvDelete1, rv1)
rvDelete2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
require.NoError(t, err)
require.Greater(t, rvDelete2, rvDelete1)
// item2 deleted and recreated, should not be returned in trash
rv2, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
require.NoError(t, err)
require.Greater(t, rv2, int64(0))
rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv2))
require.NoError(t, err)
require.Greater(t, rvDelete3, rv2)
rv3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
@ -1325,10 +1333,10 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken
},
},
},
expectedVersions: []int64{rvDelete2},
expectedVersions: []int64{rvDelete1},
expectedValues: []string{"item1 DELETED"},
minExpectedHeadRV: rvDelete2,
expectedContinueRV: rvDelete2,
minExpectedHeadRV: rvDelete1,
expectedContinueRV: rvDelete1,
expectedSortAsc: false,
},
{

View File

@ -209,7 +209,7 @@
"path": "public/plugins/grafana-azure-monitor-datasource/img/azure_monitor_cpu.png"
}
],
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": [
"azure",
@ -880,7 +880,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -934,7 +934,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": [
"grafana",
@ -1000,7 +1000,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -1217,7 +1217,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -1325,7 +1325,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -1375,7 +1375,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -1425,7 +1425,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -1629,7 +1629,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": [
"grafana",
@ -1734,12 +1734,12 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
"dependencies": {
"grafanaDependency": "",
"grafanaDependency": "\u003e=11.6.0",
"grafanaVersion": "*",
"plugins": [],
"extensions": {
@ -2042,7 +2042,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -2092,7 +2092,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},
@ -2445,7 +2445,7 @@
},
"build": {},
"screenshots": null,
"version": "12.2.0-pre",
"version": "12.3.0-pre",
"updated": "",
"keywords": null
},

View File

@ -129,7 +129,7 @@ func StartGrafanaEnv(t *testing.T, grafDir, cfgPath string) (string, *server.Tes
var storage sql.UnifiedStorageGrpcService
if runstore {
storage, err = sql.ProvideUnifiedStorageGrpcService(env.Cfg, env.FeatureToggles, env.SQLStore,
env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{})
env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil)
require.NoError(t, err)
ctx := context.Background()
err = storage.StartAsync(ctx)

View File

@ -0,0 +1,117 @@
package pgx
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"strings"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
)
func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
err := e.Ping(ctx)
if err != nil {
logCheckHealthError(ctx, e.dsInfo, err)
if strings.EqualFold(req.PluginContext.User.Role, "Admin") {
return ErrToHealthCheckResult(err)
}
errResponse := &backend.CheckHealthResult{
Status: backend.HealthStatusError,
Message: e.TransformQueryError(e.log, err).Error(),
}
return errResponse, nil
}
return &backend.CheckHealthResult{Status: backend.HealthStatusOk, Message: "Database Connection OK"}, nil
}
// ErrToHealthCheckResult converts error into user friendly health check message
// This should be called with non nil error. If the err parameter is empty, we will send Internal Server Error
func ErrToHealthCheckResult(err error) (*backend.CheckHealthResult, error) {
if err == nil {
return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"}, nil
}
res := &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: err.Error()}
details := map[string]string{
"verboseMessage": err.Error(),
"errorDetailsLink": "https://grafana.com/docs/grafana/latest/datasources/postgres",
}
var opErr *net.OpError
if errors.As(err, &opErr) {
res.Message = "Network error: Failed to connect to the server"
if opErr != nil && opErr.Err != nil {
errMessage := opErr.Err.Error()
if strings.HasSuffix(opErr.Err.Error(), "no such host") {
errMessage = "no such host"
}
if strings.HasSuffix(opErr.Err.Error(), "unknown port") {
errMessage = "unknown port"
}
if strings.HasSuffix(opErr.Err.Error(), "invalid port") {
errMessage = "invalid port"
}
if strings.HasSuffix(opErr.Err.Error(), "missing port in address") {
errMessage = "missing port in address"
}
if strings.HasSuffix(opErr.Err.Error(), "invalid syntax") {
errMessage = "invalid syntax found in the address"
}
res.Message += fmt.Sprintf(". Error message: %s", errMessage)
}
}
if errors.Is(err, sqleng.ErrParsingPostgresURL) {
res.Message = fmt.Sprintf("Connection string error: %s", sqleng.ErrParsingPostgresURL.Error())
if unwrappedErr := errors.Unwrap(err); unwrappedErr != nil {
details["verboseMessage"] = unwrappedErr.Error()
}
}
detailBytes, marshalErr := json.Marshal(details)
if marshalErr != nil {
return res, nil
}
res.JSONDetails = detailBytes
return res, nil
}
func logCheckHealthError(ctx context.Context, dsInfo sqleng.DataSourceInfo, err error) {
logger := log.DefaultLogger.FromContext(ctx)
configSummary := map[string]any{
"config_url_length": len(dsInfo.URL),
"config_user_length": len(dsInfo.User),
"config_database_length": len(dsInfo.Database),
"config_json_data_database_length": len(dsInfo.JsonData.Database),
"config_max_open_conns": dsInfo.JsonData.MaxOpenConns,
"config_max_idle_conns": dsInfo.JsonData.MaxIdleConns,
"config_conn_max_life_time": dsInfo.JsonData.ConnMaxLifetime,
"config_conn_timeout": dsInfo.JsonData.ConnectionTimeout,
"config_timescaledb": dsInfo.JsonData.Timescaledb,
"config_ssl_mode": dsInfo.JsonData.Mode,
"config_tls_configuration_method": dsInfo.JsonData.ConfigurationMethod,
"config_tls_skip_verify": dsInfo.JsonData.TlsSkipVerify,
"config_timezone": dsInfo.JsonData.Timezone,
"config_time_interval": dsInfo.JsonData.TimeInterval,
"config_enable_secure_proxy": dsInfo.JsonData.SecureDSProxy,
"config_allow_clear_text_passwords": dsInfo.JsonData.AllowCleartextPasswords,
"config_authentication_type": dsInfo.JsonData.AuthenticationType,
"config_ssl_root_cert_file_length": len(dsInfo.JsonData.RootCertFile),
"config_ssl_cert_file_length": len(dsInfo.JsonData.CertFile),
"config_ssl_key_file_length": len(dsInfo.JsonData.CertKeyFile),
"config_encrypt_length": len(dsInfo.JsonData.Encrypt),
"config_server_name_length": len(dsInfo.JsonData.Servername),
"config_password_length": len(dsInfo.DecryptedSecureJSONData["password"]),
"config_tls_ca_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsCACert"]),
"config_tls_client_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsClientCert"]),
"config_tls_client_key_length": len(dsInfo.DecryptedSecureJSONData["tlsClientKey"]),
}
configSummaryJSON, marshalError := json.Marshal(configSummary)
if marshalError != nil {
logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error")
return
}
logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error_detailed", "details", string(configSummaryJSON))
}

View File

@ -0,0 +1,61 @@
package pgx
import (
"errors"
"fmt"
"net"
"testing"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestErrToHealthCheckResult(t *testing.T) {
tests := []struct {
name string
err error
want *backend.CheckHealthResult
}{
{
name: "without error",
want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"},
},
{
name: "network error",
err: errors.Join(errors.New("foo"), &net.OpError{Op: "read", Net: "tcp", Err: errors.New("some op")}),
want: &backend.CheckHealthResult{
Status: backend.HealthStatusError,
Message: "Network error: Failed to connect to the server. Error message: some op",
JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"foo\nread tcp: some op"}`),
},
},
{
name: "regular error",
err: errors.New("internal server error"),
want: &backend.CheckHealthResult{
Status: backend.HealthStatusError,
Message: "internal server error",
JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"internal server error"}`),
},
},
{
name: "invalid port specifier error",
err: fmt.Errorf("%w %q: %w", sqleng.ErrParsingPostgresURL, `"foo.bar.co"`, errors.New(`strconv.Atoi: parsing "foo.bar.co": invalid syntax`)),
want: &backend.CheckHealthResult{
Status: backend.HealthStatusError,
Message: "Connection string error: error parsing postgres url",
JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"error parsing postgres url \"\\\"foo.bar.co\\\"\": strconv.Atoi: parsing \"foo.bar.co\": invalid syntax"}`),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ErrToHealthCheckResult(tt.err)
require.Nil(t, err)
assert.Equal(t, string(tt.want.JSONDetails), string(got.JSONDetails))
require.Equal(t, tt.want, got)
})
}
}

View File

@ -1,25 +1,109 @@
package sqleng
package pgx
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
"github.com/jackc/pgx/v5/pgxpool"
)
func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer,
// MetaKeyExecutedQueryString is the key where the executed query should get stored
const MetaKeyExecutedQueryString = "executedQueryString"
// SQLMacroEngine interpolates macros into sql. It takes in the Query to have access to query context and
// timeRange to be able to generate queries that use from and to.
type SQLMacroEngine interface {
Interpolate(query *backend.DataQuery, timeRange backend.TimeRange, sql string) (string, error)
}
// SqlQueryResultTransformer transforms a query result row to RowValues with proper types.
type SqlQueryResultTransformer interface {
// TransformQueryError transforms a query error.
TransformQueryError(logger log.Logger, err error) error
GetConverterList() []sqlutil.StringConverter
}
type JsonData struct {
MaxOpenConns int `json:"maxOpenConns"`
MaxIdleConns int `json:"maxIdleConns"`
ConnMaxLifetime int `json:"connMaxLifetime"`
ConnectionTimeout int `json:"connectionTimeout"`
Timescaledb bool `json:"timescaledb"`
Mode string `json:"sslmode"`
ConfigurationMethod string `json:"tlsConfigurationMethod"`
TlsSkipVerify bool `json:"tlsSkipVerify"`
RootCertFile string `json:"sslRootCertFile"`
CertFile string `json:"sslCertFile"`
CertKeyFile string `json:"sslKeyFile"`
Timezone string `json:"timezone"`
Encrypt string `json:"encrypt"`
Servername string `json:"servername"`
TimeInterval string `json:"timeInterval"`
Database string `json:"database"`
SecureDSProxy bool `json:"enableSecureSocksProxy"`
SecureDSProxyUsername string `json:"secureSocksProxyUsername"`
AllowCleartextPasswords bool `json:"allowCleartextPasswords"`
AuthenticationType string `json:"authenticationType"`
}
type DataPluginConfiguration struct {
DSInfo sqleng.DataSourceInfo
TimeColumnNames []string
MetricColumnTypes []string
RowLimit int64
}
type DataSourceHandler struct {
macroEngine SQLMacroEngine
queryResultTransformer SqlQueryResultTransformer
timeColumnNames []string
metricColumnTypes []string
log log.Logger
dsInfo sqleng.DataSourceInfo
rowLimit int64
userError string
pool *pgxpool.Pool
}
type QueryJson struct {
RawSql string `json:"rawSql"`
Fill bool `json:"fill"`
FillInterval float64 `json:"fillInterval"`
FillMode string `json:"fillMode"`
FillValue float64 `json:"fillValue"`
Format string `json:"format"`
}
func (e *DataSourceHandler) TransformQueryError(logger log.Logger, err error) error {
// OpError is the error type usually returned by functions in the net
// package. It describes the operation, network type, and address of
// an error. We log this error rather than return it to the client
// for security purposes.
var opErr *net.OpError
if errors.As(err, &opErr) {
return fmt.Errorf("failed to connect to server - %s", e.userError)
}
return e.queryResultTransformer.TransformQueryError(logger, err)
}
func NewQueryDataHandler(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer,
macroEngine SQLMacroEngine, log log.Logger) (*DataSourceHandler, error) {
queryDataHandler := DataSourceHandler{
queryResultTransformer: queryResultTransformer,
@ -43,7 +127,12 @@ func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, conf
return &queryDataHandler, nil
}
func (e *DataSourceHandler) DisposePGX() {
type DBDataResponse struct {
dataResponse backend.DataResponse
refID string
}
func (e *DataSourceHandler) Dispose() {
e.log.Debug("Disposing DB...")
if e.pool != nil {
@ -53,11 +142,11 @@ func (e *DataSourceHandler) DisposePGX() {
e.log.Debug("DB disposed")
}
func (e *DataSourceHandler) PingPGX(ctx context.Context) error {
func (e *DataSourceHandler) Ping(ctx context.Context) error {
return e.pool.Ping(ctx)
}
func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
func (e *DataSourceHandler) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
result := backend.NewQueryDataResponse()
ch := make(chan DBDataResponse, len(req.Queries))
var wg sync.WaitGroup
@ -83,7 +172,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query
}
wg.Add(1)
go e.executeQueryPGX(ctx, query, &wg, ch, queryjson)
go e.executeQuery(ctx, query, &wg, ch, queryjson)
}
wg.Wait()
@ -101,7 +190,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query
func (e *DataSourceHandler) handleQueryError(frameErr string, err error, query string, source backend.ErrorSource, ch chan DBDataResponse, queryResult DBDataResponse) {
var emptyFrame data.Frame
emptyFrame.SetMeta(&data.FrameMeta{ExecutedQueryString: query})
if backend.IsDownstreamError(err) {
if isDownstreamError(err) {
source = backend.ErrorSourceDownstream
}
queryResult.dataResponse.Error = fmt.Errorf("%s: %w", frameErr, err)
@ -127,6 +216,18 @@ func (e *DataSourceHandler) handlePanic(logger log.Logger, queryResult *DBDataRe
}
}
// Interpolate provides global macros/substitutions for all sql datasources.
var Interpolate = func(query backend.DataQuery, timeRange backend.TimeRange, timeInterval string, sql string) string {
interval := query.Interval
sql = strings.ReplaceAll(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10))
sql = strings.ReplaceAll(sql, "$__interval", gtime.FormatInterval(interval))
sql = strings.ReplaceAll(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.From.UTC().Unix()))
sql = strings.ReplaceAll(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.To.UTC().Unix()))
return sql
}
func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgconn.Result, error) {
c, err := e.pool.Acquire(ctx)
if err != nil {
@ -140,7 +241,7 @@ func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgc
return mrr.ReadAll()
}
func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup,
func (e *DataSourceHandler) executeQuery(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup,
ch chan DBDataResponse, queryJSON QueryJson) {
defer wg.Done()
queryResult := DBDataResponse{
@ -171,7 +272,7 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query
return
}
qm, err := e.newProcessCfgPGX(queryContext, query, results, interpolatedQuery)
qm, err := e.newProcessCfg(queryContext, query, results, interpolatedQuery)
if err != nil {
e.handleQueryError("failed to get configurations", err, interpolatedQuery, backend.ErrorSourceDownstream, ch, queryResult)
return
@ -186,6 +287,47 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query
e.processFrame(frame, qm, queryResult, ch, logger)
}
// dataQueryFormat is the type of query.
type dataQueryFormat string
const (
// dataQueryFormatTable identifies a table query (default).
dataQueryFormatTable dataQueryFormat = "table"
// dataQueryFormatSeries identifies a time series query.
dataQueryFormatSeries dataQueryFormat = "time_series"
)
type dataQueryModel struct {
InterpolatedQuery string // property not set until after Interpolate()
Format dataQueryFormat
TimeRange backend.TimeRange
FillMissing *data.FillMissing // property not set until after Interpolate()
Interval time.Duration
columnNames []string
columnTypes []string
timeIndex int
timeEndIndex int
metricIndex int
metricPrefix bool
queryContext context.Context
}
func convertSQLTimeColumnsToEpochMS(frame *data.Frame, qm *dataQueryModel) error {
if qm.timeIndex != -1 {
if err := convertSQLTimeColumnToEpochMS(frame, qm.timeIndex); err != nil {
return fmt.Errorf("%v: %w", "failed to convert time column", err)
}
}
if qm.timeEndIndex != -1 {
if err := convertSQLTimeColumnToEpochMS(frame, qm.timeEndIndex); err != nil {
return fmt.Errorf("%v: %w", "failed to convert timeend column", err)
}
}
return nil
}
func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, queryResult DBDataResponse, ch chan DBDataResponse, logger log.Logger) {
if frame.Meta == nil {
frame.Meta = &data.FrameMeta{}
@ -281,10 +423,10 @@ func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel,
ch <- queryResult
}
func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query backend.DataQuery,
func (e *DataSourceHandler) newProcessCfg(queryContext context.Context, query backend.DataQuery,
results []*pgconn.Result, interpolatedQuery string) (*dataQueryModel, error) {
columnNames := []string{}
columnTypesPGX := []string{}
columnTypes := []string{}
// The results will contain column information in the metadata
for _, result := range results {
@ -296,20 +438,20 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query
// Handle special cases for field types
switch field.DataTypeOID {
case pgtype.TimetzOID:
columnTypesPGX = append(columnTypesPGX, "timetz")
columnTypes = append(columnTypes, "timetz")
case 790:
columnTypesPGX = append(columnTypesPGX, "money")
columnTypes = append(columnTypes, "money")
default:
columnTypesPGX = append(columnTypesPGX, "unknown")
columnTypes = append(columnTypes, "unknown")
}
} else {
columnTypesPGX = append(columnTypesPGX, pqtype.Name)
columnTypes = append(columnTypes, pqtype.Name)
}
}
}
qm := &dataQueryModel{
columnTypesPGX: columnTypesPGX,
columnTypes: columnTypes,
columnNames: columnNames,
timeIndex: -1,
timeEndIndex: -1,
@ -370,7 +512,7 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query
qm.metricIndex = i
default:
if qm.metricIndex == -1 {
columnType := qm.columnTypesPGX[i]
columnType := qm.columnTypes[i]
for _, mct := range e.metricColumnTypes {
if columnType == mct {
qm.metricIndex = i
@ -596,3 +738,99 @@ func getFieldTypesFromDescriptions(fieldDescriptions []pgconn.FieldDescription,
}
return fieldTypes, nil
}
// convertSQLTimeColumnToEpochMS converts column named time to unix timestamp in milliseconds
// to make native datetime types and epoch dates work in annotation and table queries.
func convertSQLTimeColumnToEpochMS(frame *data.Frame, timeIndex int) error {
if timeIndex < 0 || timeIndex >= len(frame.Fields) {
return fmt.Errorf("timeIndex %d is out of range", timeIndex)
}
origin := frame.Fields[timeIndex]
valueType := origin.Type()
if valueType == data.FieldTypeTime || valueType == data.FieldTypeNullableTime {
return nil
}
newField := data.NewFieldFromFieldType(data.FieldTypeNullableTime, 0)
newField.Name = origin.Name
newField.Labels = origin.Labels
valueLength := origin.Len()
for i := 0; i < valueLength; i++ {
v, err := origin.NullableFloatAt(i)
if err != nil {
return fmt.Errorf("unable to convert data to a time field")
}
if v == nil {
newField.Append(nil)
} else {
timestamp := time.Unix(0, int64(epochPrecisionToMS(*v))*int64(time.Millisecond))
newField.Append(&timestamp)
}
}
frame.Fields[timeIndex] = newField
return nil
}
// convertSQLValueColumnToFloat converts timeseries value column to float.
func convertSQLValueColumnToFloat(frame *data.Frame, Index int) (*data.Frame, error) {
if Index < 0 || Index >= len(frame.Fields) {
return frame, fmt.Errorf("metricIndex %d is out of range", Index)
}
origin := frame.Fields[Index]
valueType := origin.Type()
if valueType == data.FieldTypeFloat64 || valueType == data.FieldTypeNullableFloat64 {
return frame, nil
}
newField := data.NewFieldFromFieldType(data.FieldTypeNullableFloat64, origin.Len())
newField.Name = origin.Name
newField.Labels = origin.Labels
for i := 0; i < origin.Len(); i++ {
v, err := origin.NullableFloatAt(i)
if err != nil {
return frame, err
}
newField.Set(i, v)
}
frame.Fields[Index] = newField
return frame, nil
}
// epochPrecisionToMS converts epoch precision to millisecond, if needed.
// Only seconds to milliseconds supported right now
func epochPrecisionToMS(value float64) float64 {
s := strconv.FormatFloat(value, 'e', -1, 64)
if strings.HasSuffix(s, "e+09") {
return value * float64(1e3)
}
if strings.HasSuffix(s, "e+18") {
return value / float64(time.Millisecond)
}
return value
}
func isDownstreamError(err error) bool {
if backend.IsDownstreamError(err) {
return true
}
resultProcessingDownstreamErrors := []error{
data.ErrorInputFieldsWithoutRows,
data.ErrorSeriesUnsorted,
data.ErrorNullTimeValues,
}
for _, e := range resultProcessingDownstreamErrors {
if errors.Is(err, e) {
return true
}
}
return false
}

View File

@ -0,0 +1,681 @@
package pgx
import (
"fmt"
"net"
"testing"
"time"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
)
func Pointer[T any](v T) *T { return &v }
func TestSQLEngine(t *testing.T) {
dt := time.Date(2018, 3, 14, 21, 20, 6, int(527345*time.Microsecond), time.UTC)
t.Run("Handle interpolating $__interval and $__interval_ms", func(t *testing.T) {
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
to := from.Add(5 * time.Minute)
timeRange := backend.TimeRange{From: from, To: to}
text := "$__interval $__timeGroupAlias(time,$__interval) $__interval_ms"
t.Run("interpolate 10 minutes $__interval", func(t *testing.T) {
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Minute * 10}
sql := Interpolate(query, timeRange, "", text)
require.Equal(t, "10m $__timeGroupAlias(time,10m) 600000", sql)
})
t.Run("interpolate 4seconds $__interval", func(t *testing.T) {
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 4}
sql := Interpolate(query, timeRange, "", text)
require.Equal(t, "4s $__timeGroupAlias(time,4s) 4000", sql)
})
t.Run("interpolate 200 milliseconds $__interval", func(t *testing.T) {
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Millisecond * 200}
sql := Interpolate(query, timeRange, "", text)
require.Equal(t, "200ms $__timeGroupAlias(time,200ms) 200", sql)
})
})
t.Run("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func(t *testing.T) {
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
to := from.Add(5 * time.Minute)
timeRange := backend.TimeRange{From: from, To: to}
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 60}
t.Run("interpolate __unixEpochFrom function", func(t *testing.T) {
sql := Interpolate(query, timeRange, "", "select $__unixEpochFrom()")
require.Equal(t, fmt.Sprintf("select %d", from.Unix()), sql)
})
t.Run("interpolate __unixEpochTo function", func(t *testing.T) {
sql := Interpolate(query, timeRange, "", "select $__unixEpochTo()")
require.Equal(t, fmt.Sprintf("select %d", to.Unix()), sql)
})
})
t.Run("Given row values with int64 as time columns", func(t *testing.T) {
tSeconds := dt.Unix()
tMilliseconds := dt.UnixNano() / 1e6
tNanoSeconds := dt.UnixNano()
var nilPointer *int64
originFrame := data.NewFrame("",
data.NewField("time1", nil, []int64{
tSeconds,
}),
data.NewField("time2", nil, []*int64{
Pointer(tSeconds),
}),
data.NewField("time3", nil, []int64{
tMilliseconds,
}),
data.NewField("time4", nil, []*int64{
Pointer(tMilliseconds),
}),
data.NewField("time5", nil, []int64{
tNanoSeconds,
}),
data.NewField("time6", nil, []*int64{
Pointer(tNanoSeconds),
}),
data.NewField("time7", nil, []*int64{
nilPointer,
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
err := convertSQLTimeColumnToEpochMS(originFrame, i)
require.NoError(t, err)
}
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
require.Nil(t, originFrame.Fields[6].At(0))
})
t.Run("Given row values with uint64 as time columns", func(t *testing.T) {
tSeconds := uint64(dt.Unix())
tMilliseconds := uint64(dt.UnixNano() / 1e6)
tNanoSeconds := uint64(dt.UnixNano())
var nilPointer *uint64
originFrame := data.NewFrame("",
data.NewField("time1", nil, []uint64{
tSeconds,
}),
data.NewField("time2", nil, []*uint64{
Pointer(tSeconds),
}),
data.NewField("time3", nil, []uint64{
tMilliseconds,
}),
data.NewField("time4", nil, []*uint64{
Pointer(tMilliseconds),
}),
data.NewField("time5", nil, []uint64{
tNanoSeconds,
}),
data.NewField("time6", nil, []*uint64{
Pointer(tNanoSeconds),
}),
data.NewField("time7", nil, []*uint64{
nilPointer,
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
err := convertSQLTimeColumnToEpochMS(originFrame, i)
require.NoError(t, err)
}
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
require.Nil(t, originFrame.Fields[6].At(0))
})
t.Run("Given row values with int32 as time columns", func(t *testing.T) {
tSeconds := int32(dt.Unix())
var nilInt *int32
originFrame := data.NewFrame("",
data.NewField("time1", nil, []int32{
tSeconds,
}),
data.NewField("time2", nil, []*int32{
Pointer(tSeconds),
}),
data.NewField("time7", nil, []*int32{
nilInt,
}),
)
for i := 0; i < 3; i++ {
err := convertSQLTimeColumnToEpochMS(originFrame, i)
require.NoError(t, err)
}
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
require.Nil(t, originFrame.Fields[2].At(0))
})
t.Run("Given row values with uint32 as time columns", func(t *testing.T) {
tSeconds := uint32(dt.Unix())
var nilInt *uint32
originFrame := data.NewFrame("",
data.NewField("time1", nil, []uint32{
tSeconds,
}),
data.NewField("time2", nil, []*uint32{
Pointer(tSeconds),
}),
data.NewField("time7", nil, []*uint32{
nilInt,
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
err := convertSQLTimeColumnToEpochMS(originFrame, i)
require.NoError(t, err)
}
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
require.Nil(t, originFrame.Fields[2].At(0))
})
t.Run("Given row values with float64 as time columns", func(t *testing.T) {
tSeconds := float64(dt.UnixNano()) / float64(time.Second)
tMilliseconds := float64(dt.UnixNano()) / float64(time.Millisecond)
tNanoSeconds := float64(dt.UnixNano())
var nilPointer *float64
originFrame := data.NewFrame("",
data.NewField("time1", nil, []float64{
tSeconds,
}),
data.NewField("time2", nil, []*float64{
Pointer(tSeconds),
}),
data.NewField("time3", nil, []float64{
tMilliseconds,
}),
data.NewField("time4", nil, []*float64{
Pointer(tMilliseconds),
}),
data.NewField("time5", nil, []float64{
tNanoSeconds,
}),
data.NewField("time6", nil, []*float64{
Pointer(tNanoSeconds),
}),
data.NewField("time7", nil, []*float64{
nilPointer,
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
err := convertSQLTimeColumnToEpochMS(originFrame, i)
require.NoError(t, err)
}
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
require.Nil(t, originFrame.Fields[6].At(0))
})
t.Run("Given row values with float32 as time columns", func(t *testing.T) {
tSeconds := float32(dt.Unix())
var nilInt *float32
originFrame := data.NewFrame("",
data.NewField("time1", nil, []float32{
tSeconds,
}),
data.NewField("time2", nil, []*float32{
Pointer(tSeconds),
}),
data.NewField("time7", nil, []*float32{
nilInt,
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
err := convertSQLTimeColumnToEpochMS(originFrame, i)
require.NoError(t, err)
}
require.Equal(t, int64(tSeconds), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
require.Equal(t, int64(tSeconds), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
require.Nil(t, originFrame.Fields[2].At(0))
})
t.Run("Given row with value columns, would be converted to float64", func(t *testing.T) {
originFrame := data.NewFrame("",
data.NewField("value1", nil, []int64{
int64(1),
}),
data.NewField("value2", nil, []*int64{
Pointer(int64(1)),
}),
data.NewField("value3", nil, []int32{
int32(1),
}),
data.NewField("value4", nil, []*int32{
Pointer(int32(1)),
}),
data.NewField("value5", nil, []int16{
int16(1),
}),
data.NewField("value6", nil, []*int16{
Pointer(int16(1)),
}),
data.NewField("value7", nil, []int8{
int8(1),
}),
data.NewField("value8", nil, []*int8{
Pointer(int8(1)),
}),
data.NewField("value9", nil, []float64{
float64(1),
}),
data.NewField("value10", nil, []*float64{
Pointer(1.0),
}),
data.NewField("value11", nil, []float32{
float32(1),
}),
data.NewField("value12", nil, []*float32{
Pointer(float32(1)),
}),
data.NewField("value13", nil, []uint64{
uint64(1),
}),
data.NewField("value14", nil, []*uint64{
Pointer(uint64(1)),
}),
data.NewField("value15", nil, []uint32{
uint32(1),
}),
data.NewField("value16", nil, []*uint32{
Pointer(uint32(1)),
}),
data.NewField("value17", nil, []uint16{
uint16(1),
}),
data.NewField("value18", nil, []*uint16{
Pointer(uint16(1)),
}),
data.NewField("value19", nil, []uint8{
uint8(1),
}),
data.NewField("value20", nil, []*uint8{
Pointer(uint8(1)),
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
_, err := convertSQLValueColumnToFloat(originFrame, i)
require.NoError(t, err)
if i == 8 {
require.Equal(t, float64(1), originFrame.Fields[i].At(0).(float64))
} else {
require.NotNil(t, originFrame.Fields[i].At(0).(*float64))
require.Equal(t, float64(1), *originFrame.Fields[i].At(0).(*float64))
}
}
})
t.Run("Given row with nil value columns", func(t *testing.T) {
var int64NilPointer *int64
var int32NilPointer *int32
var int16NilPointer *int16
var int8NilPointer *int8
var float64NilPointer *float64
var float32NilPointer *float32
var uint64NilPointer *uint64
var uint32NilPointer *uint32
var uint16NilPointer *uint16
var uint8NilPointer *uint8
originFrame := data.NewFrame("",
data.NewField("value1", nil, []*int64{
int64NilPointer,
}),
data.NewField("value2", nil, []*int32{
int32NilPointer,
}),
data.NewField("value3", nil, []*int16{
int16NilPointer,
}),
data.NewField("value4", nil, []*int8{
int8NilPointer,
}),
data.NewField("value5", nil, []*float64{
float64NilPointer,
}),
data.NewField("value6", nil, []*float32{
float32NilPointer,
}),
data.NewField("value7", nil, []*uint64{
uint64NilPointer,
}),
data.NewField("value8", nil, []*uint32{
uint32NilPointer,
}),
data.NewField("value9", nil, []*uint16{
uint16NilPointer,
}),
data.NewField("value10", nil, []*uint8{
uint8NilPointer,
}),
)
for i := 0; i < len(originFrame.Fields); i++ {
t.Run("", func(t *testing.T) {
_, err := convertSQLValueColumnToFloat(originFrame, i)
require.NoError(t, err)
require.Nil(t, originFrame.Fields[i].At(0))
})
}
})
t.Run("Should not return raw connection errors", func(t *testing.T) {
err := net.OpError{Op: "Dial", Err: fmt.Errorf("inner-error")}
transformer := &testQueryResultTransformer{}
dp := DataSourceHandler{
log: backend.NewLoggerWith("logger", "test"),
queryResultTransformer: transformer,
}
resultErr := dp.TransformQueryError(dp.log, &err)
assert.False(t, transformer.transformQueryErrorWasCalled)
errorText := resultErr.Error()
assert.NotEqual(t, err, resultErr)
assert.NotContains(t, errorText, "inner-error")
assert.Contains(t, errorText, "failed to connect to server")
})
t.Run("Should return non-connection errors unmodified", func(t *testing.T) {
err := fmt.Errorf("normal error")
transformer := &testQueryResultTransformer{}
dp := DataSourceHandler{
log: backend.NewLoggerWith("logger", "test"),
queryResultTransformer: transformer,
}
resultErr := dp.TransformQueryError(dp.log, err)
assert.True(t, transformer.transformQueryErrorWasCalled)
assert.Equal(t, err, resultErr)
assert.ErrorIs(t, err, resultErr)
})
}
func TestConvertResultsToFrame(t *testing.T) {
// Import the pgx packages needed for testing
// These imports are included in the main file but need to be accessible for tests
t.Run("convertResultsToFrame with single result", func(t *testing.T) {
// Create mock field descriptions
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
{Name: "name", DataTypeOID: pgtype.TextOID},
{Name: "value", DataTypeOID: pgtype.Float8OID},
}
// Create mock result data
mockRows := [][][]byte{
{[]byte("1"), []byte("test1"), []byte("10.5")},
{[]byte("2"), []byte("test2"), []byte("20.7")},
}
// Create mock result
result := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
results := []*pgconn.Result{result}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 3, len(frame.Fields))
require.Equal(t, 2, frame.Rows())
// Verify field names
require.Equal(t, "id", frame.Fields[0].Name)
require.Equal(t, "name", frame.Fields[1].Name)
require.Equal(t, "value", frame.Fields[2].Name)
})
t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) {
// Create mock field descriptions (same structure for both results)
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
{Name: "name", DataTypeOID: pgtype.TextOID},
}
// Create first result
mockRows1 := [][][]byte{
{[]byte("1"), []byte("test1")},
{[]byte("2"), []byte("test2")},
}
result1 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows1,
}
result1.CommandTag = pgconn.NewCommandTag("SELECT 2")
// Create second result with same structure
mockRows2 := [][][]byte{
{[]byte("3"), []byte("test3")},
{[]byte("4"), []byte("test4")},
}
result2 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows2,
}
result2.CommandTag = pgconn.NewCommandTag("SELECT 2")
results := []*pgconn.Result{result1, result2}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 2, len(frame.Fields))
require.Equal(t, 4, frame.Rows()) // Should have rows from both results
// Verify field names
require.Equal(t, "id", frame.Fields[0].Name)
require.Equal(t, "name", frame.Fields[1].Name)
})
t.Run("convertResultsToFrame with row limit", func(t *testing.T) {
// Create mock field descriptions
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
}
// Create mock result data with 3 rows
mockRows := [][][]byte{
{[]byte("1")},
{[]byte("2")},
{[]byte("3")},
}
result := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
result.CommandTag = pgconn.NewCommandTag("SELECT 3")
results := []*pgconn.Result{result}
// Set row limit to 2
frame, err := convertResultsToFrame(results, 2)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 1, len(frame.Fields))
require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows
// Should have a notice about the limit
require.NotNil(t, frame.Meta)
require.Len(t, frame.Meta.Notices, 1)
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
})
t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) {
// Create a non-SELECT result (should be skipped)
nonSelectResult := &pgconn.Result{}
nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1")
// Create a SELECT result
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
}
mockRows := [][][]byte{
{[]byte("1")},
}
selectResult := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1")
results := []*pgconn.Result{nonSelectResult, selectResult}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 1, len(frame.Fields))
require.Equal(t, 1, frame.Rows())
})
t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) {
// Create only non-SELECT results
result1 := &pgconn.Result{}
result1.CommandTag = pgconn.NewCommandTag("UPDATE 1")
result2 := &pgconn.Result{}
result2.CommandTag = pgconn.NewCommandTag("INSERT 1")
results := []*pgconn.Result{result1, result2}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 0, len(frame.Fields))
require.Equal(t, 0, frame.Rows())
})
t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) {
// Create mock field descriptions (same structure for both results)
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
}
// Create first result with 3 rows
mockRows1 := [][][]byte{
{[]byte("1")},
{[]byte("2")},
{[]byte("3")},
}
result1 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows1,
}
result1.CommandTag = pgconn.NewCommandTag("SELECT 3")
// Create second result with 3 rows
mockRows2 := [][][]byte{
{[]byte("4")},
{[]byte("5")},
{[]byte("6")},
}
result2 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows2,
}
result2.CommandTag = pgconn.NewCommandTag("SELECT 3")
results := []*pgconn.Result{result1, result2}
// Set row limit to 2 (should limit each result to 2 rows)
frame, err := convertResultsToFrame(results, 2)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 1, len(frame.Fields))
require.Equal(t, 4, frame.Rows()) // 2 rows from each result
// Should have notices about the limit from both results
require.NotNil(t, frame.Meta)
require.Len(t, frame.Meta.Notices, 2)
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2")
})
t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) {
// Create mock field descriptions
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
{Name: "name", DataTypeOID: pgtype.TextOID},
}
// Create mock result data with null values
mockRows := [][][]byte{
{[]byte("1"), nil}, // null name
{nil, []byte("test2")}, // null id
}
result := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
results := []*pgconn.Result{result}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 2, len(frame.Fields))
require.Equal(t, 2, frame.Rows())
// Check that null values are handled correctly
// The exact representation depends on the field type, but should not panic
require.NotPanics(t, func() {
frame.Fields[0].At(1) // null id
frame.Fields[1].At(0) // null name
})
})
}
type testQueryResultTransformer struct {
transformQueryErrorWasCalled bool
}
func (t *testQueryResultTransformer) TransformQueryError(_ log.Logger, err error) error {
t.transformQueryErrorWasCalled = true
return err
}
func (t *testQueryResultTransformer) GetConverterList() []sqlutil.StringConverter {
return nil
}

View File

@ -16,56 +16,14 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/lib/pq"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx"
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
)
func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service {
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
s := &Service{
tlsManager: newTLSManager(logger, cfg.DataPath),
pgxTlsManager: newPgxTlsManager(logger),
logger: logger,
features: features,
}
s.im = datasource.NewInstanceManager(s.newInstanceSettings())
return s
}
type Service struct {
tlsManager tlsSettingsProvider
pgxTlsManager *pgxTlsManager
im instancemgmt.InstanceManager
logger log.Logger
features featuremgmt.FeatureToggles
}
func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) {
i, err := s.im.Get(ctx, pluginCtx)
if err != nil {
return nil, err
}
instance := i.(*sqleng.DataSourceHandler)
return instance, nil
}
func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
dsInfo, err := s.getDSInfo(ctx, req.PluginContext)
if err != nil {
return nil, err
}
if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
return dsInfo.QueryDataPGX(ctx, req)
}
return dsInfo.QueryData(ctx, req)
}
func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*sql.DB, *sqleng.DataSourceHandler, error) {
connector, err := pq.NewConnector(cnnstr)
if err != nil {
@ -115,7 +73,7 @@ func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit in
return db, handler, nil
}
func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqleng.DataSourceHandler, error) {
func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqlengpgx.DataSourceHandler, error) {
pgxConf, err := pgxpool.ParseConfig(cnnstr)
if err != nil {
logger.Error("postgres config creation failed", "error", err)
@ -144,7 +102,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit
return []string{host}, nil
}
config := sqleng.DataPluginConfiguration{
config := sqlengpgx.DataPluginConfiguration{
DSInfo: dsInfo,
MetricColumnTypes: []string{"unknown", "text", "varchar", "char", "bpchar"},
RowLimit: rowLimit,
@ -160,7 +118,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit
return nil, nil, err
}
handler, err := sqleng.NewQueryDataHandlerPGX(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb),
handler, err := sqlengpgx.NewQueryDataHandler(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb),
logger)
if err != nil {
logger.Error("Failed connecting to Postgres", "err", err)
@ -171,8 +129,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit
return p, handler, nil
}
func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc {
logger := s.logger
func NewInstanceSettings(logger log.Logger, features featuremgmt.FeatureToggles, dataPath string) datasource.InstanceFactoryFunc {
return func(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
cfg := backend.GrafanaConfigFromContext(ctx)
sqlCfg, err := cfg.SQL()
@ -210,49 +167,53 @@ func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc {
DecryptedSecureJSONData: settings.DecryptedSecureJSONData,
}
isPGX := s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX)
isPGX := features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX)
userFacingDefaultError, err := cfg.UserFacingDefaultError()
if err != nil {
return nil, err
}
var handler instancemgmt.Instance
if isPGX {
pgxTlsSettings, err := s.pgxTlsManager.getTLSSettings(dsInfo)
pgxlogger := logger.FromContext(ctx).With("driver", "pgx")
pgxTlsManager := newPgxTlsManager(pgxlogger)
pgxTlsSettings, err := pgxTlsManager.getTLSSettings(dsInfo)
if err != nil {
return "", err
}
// Ensure cleanupCertFiles is called after the connection is opened
defer s.pgxTlsManager.cleanupCertFiles(pgxTlsSettings)
cnnstr, err := s.generateConnectionString(dsInfo, pgxTlsSettings, isPGX)
defer pgxTlsManager.cleanupCertFiles(pgxTlsSettings)
cnnstr, err := generateConnectionString(dsInfo, pgxTlsSettings, isPGX, pgxlogger)
if err != nil {
return "", err
}
_, handler, err = newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings)
_, handler, err := newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pgxlogger, settings)
if err != nil {
logger.Error("Failed connecting to Postgres", "err", err)
pgxlogger.Error("Failed connecting to Postgres", "err", err)
return nil, err
}
} else {
tlsSettings, err := s.tlsManager.getTLSSettings(dsInfo)
if err != nil {
return "", err
}
cnnstr, err := s.generateConnectionString(dsInfo, tlsSettings, isPGX)
if err != nil {
return nil, err
}
_, handler, err = newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings)
if err != nil {
logger.Error("Failed connecting to Postgres", "err", err)
return nil, err
}
}
logger.Debug("Successfully connected to Postgres")
pgxlogger.Debug("Successfully connected to Postgres")
return handler, nil
} else {
pqlogger := logger.FromContext(ctx).With("driver", "libpq")
tlsManager := newTLSManager(pqlogger, dataPath)
tlsSettings, err := tlsManager.getTLSSettings(dsInfo)
if err != nil {
return "", err
}
cnnstr, err := generateConnectionString(dsInfo, tlsSettings, isPGX, pqlogger)
if err != nil {
return nil, err
}
_, handler, err := newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pqlogger, settings)
if err != nil {
pqlogger.Error("Failed connecting to Postgres", "err", err)
return nil, err
}
pqlogger.Debug("Successfully connected to Postgres")
return handler, nil
}
}
}
@ -342,9 +303,7 @@ func buildBaseConnectionString(params connectionParams) string {
return connStr
}
func (s *Service) generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool) (string, error) {
logger := s.logger
func generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool, logger log.Logger) (string, error) {
params, err := parseConnectionParams(dsInfo, logger)
if err != nil {
return "", err
@ -387,15 +346,6 @@ func (t *postgresQueryResultTransformer) TransformQueryError(_ log.Logger, err e
return err
}
// CheckHealth pings the connected SQL database
func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
dsHandler, err := s.getDSInfo(ctx, req.PluginContext)
if err != nil {
return sqleng.ErrToHealthCheckResult(err)
}
return dsHandler.CheckHealth(ctx, req, s.features)
}
func (t *postgresQueryResultTransformer) GetConverterList() []sqlutil.StringConverter {
return []sqlutil.StringConverter{
{

View File

@ -186,7 +186,7 @@ func TestIntegrationPostgresPGXSnapshots(t *testing.T) {
query := makeQuery(rawSQL, test.format)
result, err := handler.QueryDataPGX(context.Background(), &query)
result, err := handler.QueryData(context.Background(), &query)
require.Len(t, result.Responses, 1)
response, found := result.Responses["A"]
require.True(t, found)

View File

@ -151,10 +151,6 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) {
}
for _, tt := range testCases {
t.Run(tt.desc, func(t *testing.T) {
svc := Service{
logger: backend.NewLoggerWith("logger", "tsdb.postgres"),
}
ds := sqleng.DataSourceInfo{
URL: tt.host,
User: tt.user,
@ -162,8 +158,9 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) {
Database: tt.database,
UID: tt.uid,
}
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false)
connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger)
if tt.expErr == "" {
require.NoError(t, err, tt.desc)
@ -284,7 +281,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -383,7 +380,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -426,7 +423,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -460,7 +457,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
frames := queryResult.Frames
@ -488,7 +485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -542,7 +539,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -589,7 +586,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -624,7 +621,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -741,7 +738,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -765,7 +762,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -789,7 +786,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -813,7 +810,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -837,7 +834,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -861,7 +858,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -885,7 +882,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -910,7 +907,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -934,7 +931,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -959,7 +956,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -991,7 +988,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1026,7 +1023,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1086,7 +1083,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["Deploys"]
@ -1113,7 +1110,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["Tickets"]
@ -1136,7 +1133,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1161,7 +1158,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1186,7 +1183,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1212,7 +1209,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1238,7 +1235,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1264,7 +1261,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1290,7 +1287,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1338,7 +1335,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := handler.QueryDataPGX(t.Context(), query)
resp, err := handler.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1368,7 +1365,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := handler.QueryDataPGX(t.Context(), query)
resp, err := handler.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1406,7 +1403,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
},
}
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
@ -1453,7 +1450,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
}
// This should not panic and should work correctly
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)
@ -1488,7 +1485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
}
// This should not panic anymore, but should return an error instead
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
@ -1517,7 +1514,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
}
// This should not panic anymore, but should return an error instead
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
@ -1546,7 +1543,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
}
// This should not panic
resp, err := exe.QueryDataPGX(t.Context(), query)
resp, err := exe.QueryData(t.Context(), query)
require.NoError(t, err)
queryResult := resp.Responses["A"]
require.NoError(t, queryResult.Error)

View File

@ -0,0 +1,80 @@
package postgres
import (
"context"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/datasource"
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx"
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
)
type Service struct {
im instancemgmt.InstanceManager
features featuremgmt.FeatureToggles
}
func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service {
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
s := &Service{
im: datasource.NewInstanceManager(NewInstanceSettings(logger, features, cfg.DataPath)),
features: features,
}
return s
}
// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside"
func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
dsHandler, err := s.getDSInfoPGX(ctx, req.PluginContext)
if err != nil {
return sqlengpgx.ErrToHealthCheckResult(err)
}
return dsHandler.CheckHealth(ctx, req)
} else {
dsHandler, err := s.getDSInfo(ctx, req.PluginContext)
if err != nil {
return sqleng.ErrToHealthCheckResult(err)
}
return dsHandler.CheckHealth(ctx, req)
}
}
// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside"
func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
dsInfo, err := s.getDSInfoPGX(ctx, req.PluginContext)
if err != nil {
return nil, err
}
return dsInfo.QueryData(ctx, req)
} else {
dsInfo, err := s.getDSInfo(ctx, req.PluginContext)
if err != nil {
return nil, err
}
return dsInfo.QueryData(ctx, req)
}
}
func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) {
i, err := s.im.Get(ctx, pluginCtx)
if err != nil {
return nil, err
}
instance := i.(*sqleng.DataSourceHandler)
return instance, nil
}
func (s *Service) getDSInfoPGX(ctx context.Context, pluginCtx backend.PluginContext) (*sqlengpgx.DataSourceHandler, error) {
i, err := s.im.Get(ctx, pluginCtx)
if err != nil {
return nil, err
}
instance := i.(*sqlengpgx.DataSourceHandler)
return instance, nil
}

View File

@ -156,10 +156,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) {
}
for _, tt := range testCases {
t.Run(tt.desc, func(t *testing.T) {
svc := Service{
tlsManager: &tlsTestManager{settings: tt.tlsSettings},
logger: backend.NewLoggerWith("logger", "tsdb.postgres"),
}
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
ds := sqleng.DataSourceInfo{
URL: tt.host,
@ -169,7 +166,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) {
UID: tt.uid,
}
connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false)
connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger)
if tt.expErr == "" {
require.NoError(t, err, tt.desc)
@ -1409,14 +1406,6 @@ func genTimeRangeByInterval(from time.Time, duration time.Duration, interval tim
return timeRange
}
type tlsTestManager struct {
settings tlsSettings
}
func (m *tlsTestManager) getTLSSettings(dsInfo sqleng.DataSourceInfo) (tlsSettings, error) {
return m.settings, nil
}
func isTestDbPostgres() bool {
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
return db == "postgres"

View File

@ -10,17 +10,11 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/lib/pq"
)
func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest, features featuremgmt.FeatureToggles) (*backend.CheckHealthResult, error) {
var err error
if features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
err = e.PingPGX(ctx)
} else {
err = e.Ping()
}
func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
err := e.Ping()
if err != nil {
logCheckHealthError(ctx, e.dsInfo, err)
if strings.EqualFold(req.PluginContext.User.Role, "Admin") {

View File

@ -19,7 +19,6 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
"github.com/jackc/pgx/v5/pgxpool"
)
// MetaKeyExecutedQueryString is the key where the executed query should get stored
@ -89,7 +88,6 @@ type DataSourceHandler struct {
dsInfo DataSourceInfo
rowLimit int64
userError string
pool *pgxpool.Pool
}
type QueryJson struct {
@ -490,7 +488,6 @@ type dataQueryModel struct {
Interval time.Duration
columnNames []string
columnTypes []*sql.ColumnType
columnTypesPGX []string
timeIndex int
timeEndIndex int
metricIndex int

View File

@ -9,8 +9,6 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -427,246 +425,6 @@ func TestSQLEngine(t *testing.T) {
})
}
func TestConvertResultsToFrame(t *testing.T) {
// Import the pgx packages needed for testing
// These imports are included in the main file but need to be accessible for tests
t.Run("convertResultsToFrame with single result", func(t *testing.T) {
// Create mock field descriptions
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
{Name: "name", DataTypeOID: pgtype.TextOID},
{Name: "value", DataTypeOID: pgtype.Float8OID},
}
// Create mock result data
mockRows := [][][]byte{
{[]byte("1"), []byte("test1"), []byte("10.5")},
{[]byte("2"), []byte("test2"), []byte("20.7")},
}
// Create mock result
result := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
results := []*pgconn.Result{result}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 3, len(frame.Fields))
require.Equal(t, 2, frame.Rows())
// Verify field names
require.Equal(t, "id", frame.Fields[0].Name)
require.Equal(t, "name", frame.Fields[1].Name)
require.Equal(t, "value", frame.Fields[2].Name)
})
t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) {
// Create mock field descriptions (same structure for both results)
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
{Name: "name", DataTypeOID: pgtype.TextOID},
}
// Create first result
mockRows1 := [][][]byte{
{[]byte("1"), []byte("test1")},
{[]byte("2"), []byte("test2")},
}
result1 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows1,
}
result1.CommandTag = pgconn.NewCommandTag("SELECT 2")
// Create second result with same structure
mockRows2 := [][][]byte{
{[]byte("3"), []byte("test3")},
{[]byte("4"), []byte("test4")},
}
result2 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows2,
}
result2.CommandTag = pgconn.NewCommandTag("SELECT 2")
results := []*pgconn.Result{result1, result2}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 2, len(frame.Fields))
require.Equal(t, 4, frame.Rows()) // Should have rows from both results
// Verify field names
require.Equal(t, "id", frame.Fields[0].Name)
require.Equal(t, "name", frame.Fields[1].Name)
})
t.Run("convertResultsToFrame with row limit", func(t *testing.T) {
// Create mock field descriptions
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
}
// Create mock result data with 3 rows
mockRows := [][][]byte{
{[]byte("1")},
{[]byte("2")},
{[]byte("3")},
}
result := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
result.CommandTag = pgconn.NewCommandTag("SELECT 3")
results := []*pgconn.Result{result}
// Set row limit to 2
frame, err := convertResultsToFrame(results, 2)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 1, len(frame.Fields))
require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows
// Should have a notice about the limit
require.NotNil(t, frame.Meta)
require.Len(t, frame.Meta.Notices, 1)
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
})
t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) {
// Create a non-SELECT result (should be skipped)
nonSelectResult := &pgconn.Result{}
nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1")
// Create a SELECT result
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
}
mockRows := [][][]byte{
{[]byte("1")},
}
selectResult := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1")
results := []*pgconn.Result{nonSelectResult, selectResult}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 1, len(frame.Fields))
require.Equal(t, 1, frame.Rows())
})
t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) {
// Create only non-SELECT results
result1 := &pgconn.Result{}
result1.CommandTag = pgconn.NewCommandTag("UPDATE 1")
result2 := &pgconn.Result{}
result2.CommandTag = pgconn.NewCommandTag("INSERT 1")
results := []*pgconn.Result{result1, result2}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 0, len(frame.Fields))
require.Equal(t, 0, frame.Rows())
})
t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) {
// Create mock field descriptions (same structure for both results)
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
}
// Create first result with 3 rows
mockRows1 := [][][]byte{
{[]byte("1")},
{[]byte("2")},
{[]byte("3")},
}
result1 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows1,
}
result1.CommandTag = pgconn.NewCommandTag("SELECT 3")
// Create second result with 3 rows
mockRows2 := [][][]byte{
{[]byte("4")},
{[]byte("5")},
{[]byte("6")},
}
result2 := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows2,
}
result2.CommandTag = pgconn.NewCommandTag("SELECT 3")
results := []*pgconn.Result{result1, result2}
// Set row limit to 2 (should limit each result to 2 rows)
frame, err := convertResultsToFrame(results, 2)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 1, len(frame.Fields))
require.Equal(t, 4, frame.Rows()) // 2 rows from each result
// Should have notices about the limit from both results
require.NotNil(t, frame.Meta)
require.Len(t, frame.Meta.Notices, 2)
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2")
})
t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) {
// Create mock field descriptions
fieldDescs := []pgconn.FieldDescription{
{Name: "id", DataTypeOID: pgtype.Int4OID},
{Name: "name", DataTypeOID: pgtype.TextOID},
}
// Create mock result data with null values
mockRows := [][][]byte{
{[]byte("1"), nil}, // null name
{nil, []byte("test2")}, // null id
}
result := &pgconn.Result{
FieldDescriptions: fieldDescs,
Rows: mockRows,
}
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
results := []*pgconn.Result{result}
frame, err := convertResultsToFrame(results, 1000)
require.NoError(t, err)
require.NotNil(t, frame)
require.Equal(t, 2, len(frame.Fields))
require.Equal(t, 2, frame.Rows())
// Check that null values are handled correctly
// The exact representation depends on the field type, but should not panic
require.NotPanics(t, func() {
frame.Fields[0].At(1) // null id
frame.Fields[1].At(0) // null name
})
})
}
type testQueryResultTransformer struct {
transformQueryErrorWasCalled bool
}

View File

@ -0,0 +1,25 @@
package main
import (
"os"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/datasource"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
postgres "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource"
)
func main() {
// No need to pass logger name, it will be set by the plugin SDK
logger := backend.NewLoggerWith()
// TODO: get rid of setting.NewCfg() and featuremgmt.FeatureToggles once PostgresDSUsePGX is removed
cfg := setting.NewCfg()
// We want to enable the feature toggle for api server
features := featuremgmt.WithFeatures(featuremgmt.FlagPostgresDSUsePGX)
if err := datasource.Manage("grafana-postgresql-datasource", postgres.NewInstanceSettings(logger, features, cfg.DataPath), datasource.ManageOpts{}); err != nil {
log.DefaultLogger.Error(err.Error())
os.Exit(1)
}
}

View File

@ -294,7 +294,13 @@ func (s *Service) handleFunctions(ctx context.Context, dsInfo *datasourceInfo, _
_, rawBody, statusCode, err := doGraphiteRequest[map[string]any](ctx, dsInfo, s.logger, req, true)
if err != nil {
return nil, statusCode, fmt.Errorf("version request failed: %v", err)
return nil, statusCode, fmt.Errorf("functions request failed: %v", err)
}
// It's possible that a HTML response may be returned
// This isn't valid so we'll return an error and use the default functions
if strings.HasPrefix(string(*rawBody), "<") {
return []byte{}, http.StatusNotAcceptable, fmt.Errorf("invalid functions response received from Graphite")
}
if rawBody == nil {

View File

@ -735,21 +735,41 @@ func TestHandleFunctions(t *testing.T) {
responseBody: `{"error": "internal error"}`,
statusCode: 500,
expectError: true,
errorContains: "version request failed",
errorContains: "functions request failed",
},
{
name: "functions request not found",
responseBody: `{"error": "not found"}`,
statusCode: 404,
expectError: true,
errorContains: "version request failed",
errorContains: "functions request failed",
},
{
name: "network error",
responseBody: "",
statusCode: 0,
expectError: true,
errorContains: "version request failed",
errorContains: "functions request failed",
},
{
name: "html response",
responseBody: `<html>
<head>
<title>Graphite Browser</title>
</head>
<frameset rows="60,*" frameborder="1" border="1">
<frame src="/browser/header/" name="Header" id='header' scrolling="no" noresize="true" />
<frame src="/composer/?" name="content" id="composerFrame"/>
</frameset>
</html>
`,
statusCode: 200,
expectError: true,
errorContains: "invalid functions response received from Graphite",
},
}

View File

@ -67,7 +67,7 @@ func convertSQLite3URL(dsn string) (string, error) {
newDSN := dsn[:pos]
q := url.Values{}
q.Add("_pragma", "busy_timeout(5000)")
q.Add("_pragma", "busy_timeout(7500)") // Default of mattn/go-sqlite3 is 5s but we increase it to 7.5s to try and avoid busy errors.
for key, values := range params {
if alias, ok := dsnAlias[strings.ToLower(key)]; ok {

View File

@ -7,7 +7,9 @@ import { Button, Drawer, Dropdown, Icon, Menu, MenuItem } from '@grafana/ui';
import { Permissions } from 'app/core/components/AccessControl';
import { appEvents } from 'app/core/core';
import { RepoType } from 'app/features/provisioning/Wizard/types';
import { BulkMoveProvisionedResource } from 'app/features/provisioning/components/BulkActions/BulkMoveProvisionedResource';
import { DeleteProvisionedFolderForm } from 'app/features/provisioning/components/Folders/DeleteProvisionedFolderForm';
import { useIsProvisionedInstance } from 'app/features/provisioning/hooks/useIsProvisionedInstance';
import { getReadOnlyTooltipText } from 'app/features/provisioning/utils/repository';
import { ShowModalReactEvent } from 'app/types/events';
import { FolderDTO } from 'app/types/folders';
@ -29,14 +31,18 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
const [isOpen, setIsOpen] = useState(false);
const [showPermissionsDrawer, setShowPermissionsDrawer] = useState(false);
const [showDeleteProvisionedFolderDrawer, setShowDeleteProvisionedFolderDrawer] = useState(false);
const [showMoveProvisionedFolderDrawer, setShowMoveProvisionedFolderDrawer] = useState(false);
const [moveFolder] = useMoveFolderMutationFacade();
const isProvisionedInstance = useIsProvisionedInstance();
const deleteFolder = useDeleteFolderMutationFacade();
const { canEditFolders, canDeleteFolders, canViewPermissions, canSetPermissions } = getFolderPermissions(folder);
const isProvisionedFolder = folder.managedBy === ManagerKind.Repo;
// When its single provisioned folder, cannot move the root repository folder
const isProvisionedRootFolder = isProvisionedFolder && !isProvisionedInstance && folder.parentUid === undefined;
// Can only move folders when the folder is not provisioned
const canMoveFolder = canEditFolders && !isProvisionedFolder;
const canMoveFolder = canEditFolders && !isProvisionedRootFolder;
const onMove = async (destinationUID: string) => {
await moveFolder({ folderUID: folder.uid, destinationUID: destinationUID });
@ -115,6 +121,10 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
setShowDeleteProvisionedFolderDrawer(true);
};
const handleShowMoveProvisionedFolderDrawer = () => {
setShowMoveProvisionedFolderDrawer(true);
};
const managePermissionsLabel = t('browse-dashboards.folder-actions-button.manage-permissions', 'Manage permissions');
const moveLabel = t('browse-dashboards.folder-actions-button.move', 'Move');
const deleteLabel = t('browse-dashboards.folder-actions-button.delete', 'Delete');
@ -122,7 +132,12 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
const menu = (
<Menu>
{canViewPermissions && <MenuItem onClick={() => setShowPermissionsDrawer(true)} label={managePermissionsLabel} />}
{canMoveFolder && !isReadOnlyRepo && <MenuItem onClick={showMoveModal} label={moveLabel} />}
{canMoveFolder && !isReadOnlyRepo && (
<MenuItem
onClick={isProvisionedFolder ? handleShowMoveProvisionedFolderDrawer : showMoveModal}
label={moveLabel}
/>
)}
{canDeleteFolders && !isReadOnlyRepo && (
<MenuItem
destructive
@ -175,6 +190,19 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
/>
</Drawer>
)}
{showMoveProvisionedFolderDrawer && (
<Drawer
title={t('browse-dashboards.action.move-provisioned-folder', 'Move provisioned folder')}
subtitle={folder.title}
onClose={() => setShowMoveProvisionedFolderDrawer(false)}
>
<BulkMoveProvisionedResource
folderUid={folder.uid}
selectedItems={{ dashboard: {}, folder: { [folder.uid]: true } }}
onDismiss={() => setShowMoveProvisionedFolderDrawer(false)}
/>
</Drawer>
)}
</>
);
}

View File

@ -4,6 +4,7 @@ import { PropsWithChildren } from 'react';
import { CoreApp, DataQueryRequest, dateTime, LoadingState, PanelData, toDataFrame } from '@grafana/data';
import { DataQuery } from '@grafana/schema';
import { mockDataSource } from 'app/features/alerting/unified/mocks';
import { ExpressionDatasourceUID } from 'app/features/expressions/types';
import { filterPanelDataToQuery, Props, QueryEditorRow } from './QueryEditorRow';
@ -464,5 +465,28 @@ describe('QueryEditorRow', () => {
expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument();
});
});
it('should not render saved queries buttons when query is an expression query', async () => {
const expressionQuery = {
refId: 'B',
datasource: {
uid: ExpressionDatasourceUID,
type: '__expr__',
},
};
const expressionProps = {
...props(testData),
query: expressionQuery,
queries: [expressionQuery],
};
render(<QueryEditorRow {...expressionProps} />);
await waitFor(() => {
expect(screen.queryByText('Save query')).not.toBeInTheDocument();
expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument();
});
});
});
});

View File

@ -34,6 +34,7 @@ import {
} from 'app/core/components/QueryOperationRow/QueryOperationRow';
import { useQueryLibraryContext } from '../../explore/QueryLibrary/QueryLibraryContext';
import { ExpressionDatasourceUID } from '../../expressions/types';
import { QueryActionComponent, RowActionComponents } from './QueryActionComponent';
import { QueryEditorRowHeader } from './QueryEditorRowHeader';
@ -386,10 +387,11 @@ export class QueryEditorRow<TQuery extends DataQuery> extends PureComponent<Prop
const hasEditorHelp = datasource?.components?.QueryEditorHelp;
const isEditingQueryLibrary = queryLibraryRef !== undefined;
const isUnifiedAlerting = app === CoreApp.UnifiedAlerting;
const isExpressionQuery = query.datasource?.uid === ExpressionDatasourceUID;
return (
<>
{!isEditingQueryLibrary && !isUnifiedAlerting && (
{!isEditingQueryLibrary && !isUnifiedAlerting && !isExpressionQuery && (
<SavedQueryButtons
query={query}
app={app}

View File

@ -5,6 +5,7 @@ import { connect, ConnectedProps } from 'react-redux';
import { GrafanaTheme2 } from '@grafana/data';
import { Trans, t } from '@grafana/i18n';
import { config, getBackendSrv } from '@grafana/runtime';
import {
Avatar,
CellProps,
@ -65,6 +66,7 @@ export const TeamList = ({
changeSort,
}: Props) => {
const [roleOptions, setRoleOptions] = useState<Role[]>([]);
const [scimGroupSyncEnabled, setScimGroupSyncEnabled] = useState(false);
const styles = useStyles2(getStyles);
useEffect(() => {
@ -77,6 +79,25 @@ export const TeamList = ({
}
}, []);
useEffect(() => {
const checkSCIMSettings = async () => {
if (!config.featureToggles.enableSCIM) {
setScimGroupSyncEnabled(false);
return;
}
try {
const scimSettings = await getBackendSrv().get(
`/apis/scim.grafana.app/v0alpha1/namespaces/${config.namespace}/config`
);
setScimGroupSyncEnabled(scimSettings?.items[0]?.spec?.enableGroupSync || false);
} catch {
setScimGroupSyncEnabled(false);
}
};
checkSCIMSettings();
}, []);
const canCreate = contextSrv.hasPermission(AccessControlAction.ActionTeamsCreate);
const displayRolePicker = shouldDisplayRolePicker();
@ -198,7 +219,7 @@ export const TeamList = ({
const canReadTeam = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsRead, original);
const canDelete =
contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsDelete, original) &&
!original.isProvisioned;
(!scimGroupSyncEnabled || !original.isProvisioned);
return (
<Stack direction="row" justifyContent="flex-end" gap={2}>
{canReadTeam && (
@ -226,7 +247,7 @@ export const TeamList = ({
},
},
],
[displayRolePicker, hasFetched, rolesLoading, roleOptions, deleteTeam, styles]
[displayRolePicker, hasFetched, rolesLoading, roleOptions, deleteTeam, styles, scimGroupSyncEnabled]
);
return (

View File

@ -2,6 +2,7 @@
"type": "datasource",
"name": "PostgreSQL",
"id": "grafana-postgresql-datasource",
"executable": "gpx_grafana-postgresql-datasource",
"aliasIDs": ["postgres"],
"category": "sql",
@ -21,6 +22,9 @@
{ "name": "Documentation", "url": "https://grafana.com/docs/grafana/latest/datasources/postgres/" }
]
},
"dependencies": {
"grafanaDependency": ">=11.6.0"
},
"alerting": true,
"annotations": true,

View File

@ -1029,9 +1029,15 @@ export class GraphiteDatasource
};
if (config.featureToggles.graphiteBackendMode) {
try {
const functions = await this.getResource<string>('functions');
this.funcDefs = gfunc.parseFuncDefs(functions);
return this.funcDefs;
} catch (error) {
console.error('Fetching graphite functions error', error);
this.funcDefs = gfunc.getFuncDefs(this.graphiteVersion);
return this.funcDefs;
}
}
return lastValueFrom(

View File

@ -273,6 +273,15 @@ describe('interpolateQueryExpr', () => {
replace: jest.fn().mockImplementation((...rest: unknown[]) => 'templateVarReplaced'),
} as unknown as TemplateSrv;
let ds = getMockInfluxDS(getMockDSInstanceSettings(), templateSrvStub);
// Mock console.warn as we expect tests to use it
beforeEach(() => {
jest.spyOn(console, 'warn').mockImplementation();
});
afterEach(() => {
jest.restoreAllMocks();
});
it('should return the value as it is', () => {
const value = 'normalValue';
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
@ -281,6 +290,18 @@ describe('interpolateQueryExpr', () => {
expect(result).toBe(expectation);
});
it('should return the escaped value if the value wrapped in regex without !~ or =~', () => {
const value = '/special/path';
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
const result = ds.interpolateQueryExpr(
value,
variableMock,
'select atan(z/sqrt(3.14)), that where path /$tempVar/'
);
const expectation = `\\/special\\/path`;
expect(result).toBe(expectation);
});
it('should return the escaped value if the value wrapped in regex', () => {
const value = '/special/path';
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();

View File

@ -360,10 +360,7 @@ export default class InfluxDatasource extends DataSourceWithBackend<InfluxQuery,
// we want to see how it's been used. If it is used in a regex expression
// we escape it. Otherwise, we return it directly.
// The regex below searches for regexes within the query string
const regexMatcher = new RegExp(
/(\s*(=|!)~\s*)\/((?![*+?])(?:[^\r\n\[/\\]|\\.|\[(?:[^\r\n\]\\]|\\.)*\])+)\/((?:g(?:im?|mi?)?|i(?:gm?|mg?)?|m(?:gi?|ig?)?)?)/,
'gm'
);
const regexMatcher = new RegExp(/(?<=\/).+?(?=\/)/, 'gm');
// If matches are found this regex is evaluated to check if the variable is contained in the regex /^...$/ (^ and $ is optional)
// i.e. /^$myVar$/ or /$myVar/ or /^($myVar)$/
const regex = new RegExp(`\\/(?:\\^)?(.*)(\\$${variable.name})(.*)(?:\\$)?\\/`, 'gm');
@ -377,14 +374,22 @@ export default class InfluxDatasource extends DataSourceWithBackend<InfluxQuery,
if (!queryMatches) {
return value;
}
for (const match of queryMatches) {
if (!match.match(regex)) {
continue;
// Use the variable specific regex against the query
if (!query.match(regex)) {
return value;
}
for (const match of queryMatches) {
// It is expected that the RegExp should be valid. As our regex matcher matches any text between two '/'
// we also validate that the expression compiles before assuming it is a regular expression.
try {
new RegExp(match);
// If the value is a string array first escape them then join them with pipe
// then put inside parenthesis.
return typeof value === 'string' ? escapeRegex(value) : `(${value.map((v) => escapeRegex(v)).join('|')})`;
} catch (e) {
console.warn(`Supplied match is not valid regex: ${match}`);
}
}
return value;

View File

@ -1,6 +1,5 @@
import { css, cx } from '@emotion/css';
import { useMemo } from 'react';
import AutoSizer from 'react-virtualized-auto-sizer';
import { GrafanaTheme2, StandardEditorProps } from '@grafana/data';
import {
@ -25,26 +24,17 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp
return (
<div className={cx(styles.editorBox)}>
<AutoSizer disableHeight>
{({ width }) => {
if (width === 0) {
return null;
}
return (
<CodeEditor
value={value}
onBlur={onChange}
onSave={onChange}
language={language}
width={width}
width="100%"
showMiniMap={false}
showLineNumbers={false}
height="500px"
getSuggestions={getSuggestions}
/>
);
}}
</AutoSizer>
</div>
);
};
@ -52,8 +42,6 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp
const getStyles = (theme: GrafanaTheme2) => ({
editorBox: css({
label: 'editorBox',
border: `1px solid ${theme.colors.border.medium}`,
borderRadius: theme.shape.radius.default,
margin: theme.spacing(0.5, 0),
width: '100%',
}),

View File

@ -10228,7 +10228,7 @@
"title": "Zdroje dat"
},
"databases": {
"title": "Databáze"
"title": ""
},
"datasources": {
"subtitle": "Přidávejte a konfigurujte zdroje dat",
@ -11772,6 +11772,8 @@
"button-cancelling": "",
"button-next": "Dokončit",
"button-start": "Zahájit synchronizaci",
"check-status-button": "",
"check-status-message": "",
"discard-modal": {
"body": "Tímto odstraníte konfiguraci úložiště a přijdete o veškerý pokrok. Opravdu chcete zahodit změny?",
"confirm": "Ano, zahodit",

View File

@ -10166,7 +10166,7 @@
"title": "Datenquellen"
},
"databases": {
"title": "Datenbanken"
"title": ""
},
"datasources": {
"subtitle": "Füge Datenquellen hinzu und konfiguriere sie",
@ -11692,6 +11692,8 @@
"button-cancelling": "",
"button-next": "Fertigstellen",
"button-start": "Synchronisierung starten",
"check-status-button": "",
"check-status-message": "",
"discard-modal": {
"body": "Dadurch wird die Repository-Konfiguration gelöscht und Sie verlieren alle Fortschritte. Sind Sie sicher, dass Sie Ihre Änderungen verwerfen möchten?",
"confirm": "Ja, verwerfen",

View File

@ -3496,6 +3496,7 @@
"move-modal-field-label": "Folder name",
"move-modal-text": "This action will move the following content:",
"move-modal-title": "Move",
"move-provisioned-folder": "Move provisioned folder",
"moving": "Moving...",
"new-folder-name-required-phrase": "Folder name is required.",
"selected-mix-resources-modal-text": "You have selected both provisioned and non-provisioned resources. These cannot be processed together. Please select only provisioned resources or only non-provisioned resources and try again.",

View File

@ -10166,7 +10166,7 @@
"title": "Orígenes de datos"
},
"databases": {
"title": "Bases de datos"
"title": ""
},
"datasources": {
"subtitle": "Añadir y configurar orígenes de datos",
@ -11692,6 +11692,8 @@
"button-cancelling": "",
"button-next": "Terminar",
"button-start": "Iniciar la sincronización",
"check-status-button": "",
"check-status-message": "",
"discard-modal": {
"body": "Esto eliminará la configuración del repositorio y se perderá todo el progreso. ¿Seguro que quieres descartar los cambios?",
"confirm": "Sí, descartar",

View File

@ -10166,7 +10166,7 @@
"title": "Sources de données"
},
"databases": {
"title": "Bases de données"
"title": ""
},
"datasources": {
"subtitle": "Ajouter et configurer des sources de données",
@ -11692,6 +11692,8 @@
"button-cancelling": "",
"button-next": "Terminer",
"button-start": "Commencer la synchronisation",
"check-status-button": "",
"check-status-message": "",
"discard-modal": {
"body": "Cette action supprimera la configuration du dépôt et vous perdrez toutes vos modifications. Voulez-vous vraiment annuler vos modifications ?",
"confirm": "Oui, annuler",

View File

@ -10166,7 +10166,7 @@
"title": "Adatforrások"
},
"databases": {
"title": "Adatbázisok"
"title": ""
},
"datasources": {
"subtitle": "Adatforrások hozzáadása és konfigurálása",
@ -11692,6 +11692,8 @@
"button-cancelling": "",
"button-next": "Befejezés",
"button-start": "Szinkronizálás indítása",
"check-status-button": "",
"check-status-message": "",
"discard-modal": {
"body": "Ez törli az adattár konfigurációját, és az összes előrehaladás el fog veszni. Biztosan elveti a módosításokat?",
"confirm": "Igen, elvetés",

Some files were not shown because too many files have changed in this diff Show More